diff options
171 files changed, 31301 insertions, 987 deletions
diff --git a/.github/workflows/llvm-bugs.yml b/.github/workflows/llvm-bugs.yml index cd3f396..3274f1a 100644 --- a/.github/workflows/llvm-bugs.yml +++ b/.github/workflows/llvm-bugs.yml @@ -52,7 +52,7 @@ jobs:                  url    : issue.data.html_url,                  labels : issue.data.labels.map((label) => label.name),                  assignee : issue.data.assignees.map((assignee) => assignee.login), -                body   : issue.data.body +                body   : maybeTruncatedBody                };                const data = { diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index acc4723..25f426b 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -138,7 +138,6 @@ jobs:            target_cmake_flags="$target_cmake_flags -DLLVM_RELEASE_ENABLE_LTO=OFF"          fi -        echo "target-cmake-flags=$target_cmake_flags" >> $GITHUB_OUTPUT          case "${{ inputs.runs-on }}" in            ubuntu-22.04*)              build_runs_on="depot-${{ inputs.runs-on }}-16" @@ -157,6 +156,23 @@ jobs:              build_runs_on=$test_runs_on              ;;          esac + +        case "$build_runs_on" in +          # These runners cannot build the full release package faster than +          # the 6 hours timeout limit, so we need to use a configuration +          # that builds more quickly. +          macos-14) +            bootstrap_prefix="BOOTSTRAP" +            target_cmake_flags="$target_cmake_flags -DLLVM_RELEASE_ENABLE_LTO=OFF -DLLVM_RELEASE_ENABLE_PGO=OFF" +            ;; +          *) +            bootstrap_prefix="BOOTSTRAP_BOOTSTRAP" +            ;; +        esac + +        target_cmake_flags="$target_cmake_flags -D${bootstrap_prefix}_CPACK_PACKAGE_FILE_NAME=$release_binary_basename" + +        echo "target-cmake-flags=$target_cmake_flags" >> $GITHUB_OUTPUT          echo "build-runs-on=$build_runs_on" >> $GITHUB_OUTPUT          echo "test-runs-on=$test_runs_on" >> $GITHUB_OUTPUT @@ -200,8 +216,7 @@ jobs:          # so we need to set some extra cmake flags to disable this.          cmake -G Ninja -S llvm -B ${{ steps.setup-stage.outputs.build-prefix }}/build \              ${{ needs.prepare.outputs.target-cmake-flags }} \ -            -C clang/cmake/caches/Release.cmake \ -            -DBOOTSTRAP_BOOTSTRAP_CPACK_PACKAGE_FILE_NAME="${{ needs.prepare.outputs.release-binary-basename }}" +            -C clang/cmake/caches/Release.cmake      - name: Build        shell: bash diff --git a/bolt/README.md b/bolt/README.md index 902d1eb6..55f742c 100644 --- a/bolt/README.md +++ b/bolt/README.md @@ -173,7 +173,7 @@ Once you have `perf.fdata` ready, you can use it for optimizations with  BOLT. Assuming your environment is setup to include the right path, execute  `llvm-bolt`:  ``` -$ llvm-bolt <executable> -o <executable>.bolt -data=perf.fdata -reorder-blocks=ext-tsp -reorder-functions=hfsort -split-functions -split-all-cold -split-eh -dyno-stats +$ llvm-bolt <executable> -o <executable>.bolt -data=perf.fdata -reorder-blocks=ext-tsp -reorder-functions=cdsort -split-functions -split-all-cold -split-eh -dyno-stats  ```  If you do need an updated debug info, then add `-update-debug-sections` option diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp index c7cd034a..7af32c8 100644 --- a/bolt/lib/Core/BinaryContext.cpp +++ b/bolt/lib/Core/BinaryContext.cpp @@ -78,6 +78,11 @@ cl::opt<std::string> CompDirOverride(               "to *.dwo files."),      cl::Hidden, cl::init(""), cl::cat(BoltCategory)); +static cl::opt<bool> CloneConstantIsland("clone-constant-island", +                                         cl::desc("clone constant islands"), +                                         cl::Hidden, cl::init(true), +                                         cl::ZeroOrMore, cl::cat(BoltCategory)); +  static cl::opt<bool>      FailOnInvalidPadding("fail-on-invalid-padding", cl::Hidden, cl::init(false),                           cl::desc("treat invalid code padding as error"), @@ -461,7 +466,8 @@ BinaryContext::handleAddressRef(uint64_t Address, BinaryFunction &BF,        // of dynamic relocs, as we currently do not support cloning them.        // Notice: we might fail to link because of this, if the original constant        // island we are referring would be emitted too far away. -      if (IslandIter->second->hasDynamicRelocationAtIsland()) { +      if (IslandIter->second->hasDynamicRelocationAtIsland() || +          !opts::CloneConstantIsland) {          MCSymbol *IslandSym =              IslandIter->second->getOrCreateIslandAccess(Address);          if (IslandSym) @@ -469,6 +475,12 @@ BinaryContext::handleAddressRef(uint64_t Address, BinaryFunction &BF,        } else if (MCSymbol *IslandSym =                       IslandIter->second->getOrCreateProxyIslandAccess(Address,                                                                        BF)) { +        LLVM_DEBUG( +            dbgs() << "BOLT-DEBUG: clone constant island at address 0x" +                   << Twine::utohexstr(IslandIter->first) << " with size of 0x" +                   << Twine::utohexstr( +                          IslandIter->second->estimateConstantIslandSize()) +                   << " bytes, referenced by " << BF << "\n");          BF.createIslandDependency(IslandSym, IslandIter->second);          return std::make_pair(IslandSym, 0);        } diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index cd27239..2e2c519 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -395,6 +395,8 @@ Improvements to Clang's diagnostics    that were previously incorrectly accepted in case of other irrelevant    conditions are now consistently diagnosed, identical to C++ mode. +- Fix false-positive unused label diagnostic when a label is used in a named break +  or continue (#GH166013)  - Clang now emits a diagnostic in case `vector_size` or `ext_vector_type`    attributes are used with a negative size (#GH165463). @@ -460,6 +462,7 @@ Bug Fixes to Attribute Support  - Fix a crash when the function name is empty in the `swift_name` attribute. (#GH157075)  - Fixes crashes or missing diagnostics with the `device_kernel` attribute. (#GH161905)  - Fix handling of parameter indexes when an attribute is applied to a C++23 explicit object member function. +- Fixed several false positives and false negatives in function effect (`nonblocking`) analysis. (#GH166078) (#GH166101) (#GH166110)  Bug Fixes to C++ Support  ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -504,6 +507,7 @@ Bug Fixes to C++ Support    nontrivial member when another member has an initializer. (#GH81774)  - Fixed a template depth issue when parsing lambdas inside a type constraint. (#GH162092)  - Diagnose unresolved overload sets in non-dependent compound requirements. (#GH51246) (#GH97753) +- Fix a crash when extracting unavailable member type from alias in template deduction. (#GH165560)  Bug Fixes to AST Handling  ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index 5cf3327..11e81e0 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -951,9 +951,9 @@ def Xarch__    the host system, which can be used to suppress incompatible GPU arguments.}]>,        MetaVarName<"<arch> <arg>">;  def Xarch_host : Separate<["-"], "Xarch_host">, Flags<[NoXarchOption]>, -  HelpText<"Pass <arg> to the CUDA/HIP host compilation">, MetaVarName<"<arg>">; +  HelpText<"Pass <arg> to host compilation in the offloading toolchain">, MetaVarName<"<arg>">;  def Xarch_device : Separate<["-"], "Xarch_device">, Flags<[NoXarchOption]>, -  HelpText<"Pass <arg> to the CUDA/HIP device compilation">, MetaVarName<"<arg>">; +  HelpText<"Pass <arg> to device compilation in the offloading toolchain">, MetaVarName<"<arg>">;  def Xassembler : Separate<["-"], "Xassembler">,    HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">,    Group<CompileOnly_Group>; diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index af5be95..0fea57b 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -3331,18 +3331,18 @@ static void emitUsed(CodeGenModule &CGM, StringRef Name,    if (List.empty())      return; -  llvm::PointerType *UnqualPtr = -      llvm::PointerType::getUnqual(CGM.getLLVMContext()); -    // Convert List to what ConstantArray needs.    SmallVector<llvm::Constant*, 8> UsedArray;    UsedArray.resize(List.size());    for (unsigned i = 0, e = List.size(); i != e; ++i) { -    UsedArray[i] = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( -        cast<llvm::Constant>(&*List[i]), UnqualPtr); +    UsedArray[i] = +        llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( +            cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);    } -  llvm::ArrayType *ATy = llvm::ArrayType::get(UnqualPtr, UsedArray.size()); +  if (UsedArray.empty()) +    return; +  llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());    auto *GV = new llvm::GlobalVariable(        CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage, diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp index f24b8ab..406c77c 100644 --- a/clang/lib/Format/WhitespaceManager.cpp +++ b/clang/lib/Format/WhitespaceManager.cpp @@ -591,7 +591,8 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,        CurrentChangeWidthRight = CurrentChange.TokenLength;      const FormatToken *MatchingParenToEncounter = nullptr;      for (unsigned J = I + 1; -         J != E && (Changes[J].NewlinesBefore == 0 || MatchingParenToEncounter); +         J != E && (Changes[J].NewlinesBefore == 0 || +                    MatchingParenToEncounter || Changes[J].IsAligned);           ++J) {        const auto &Change = Changes[J];        const auto *Tok = Change.Tok; diff --git a/clang/lib/Headers/module.modulemap b/clang/lib/Headers/module.modulemap index 2e4d533..c13dd3f 100644 --- a/clang/lib/Headers/module.modulemap +++ b/clang/lib/Headers/module.modulemap @@ -253,6 +253,11 @@ module _Builtin_stdbool [system] {    export *  } +module _Builtin_stdckdint [system] { +  header "stdckdint.h" +  export * +} +  module _Builtin_stdcountof [system] {    header "stdcountof.h"    export * diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp index 637a08f..b8202ea 100644 --- a/clang/lib/Lex/ModuleMap.cpp +++ b/clang/lib/Lex/ModuleMap.cpp @@ -258,6 +258,7 @@ static bool isBuiltinHeaderName(StringRef FileName) {             .Case("stdarg.h", true)             .Case("stdatomic.h", true)             .Case("stdbool.h", true) +           .Case("stdckdint.h", true)             .Case("stdcountof.h", true)             .Case("stddef.h", true)             .Case("stdint.h", true) diff --git a/clang/lib/Sema/SemaFunctionEffects.cpp b/clang/lib/Sema/SemaFunctionEffects.cpp index 8590ee8..4b63eb7 100644 --- a/clang/lib/Sema/SemaFunctionEffects.cpp +++ b/clang/lib/Sema/SemaFunctionEffects.cpp @@ -1208,8 +1208,16 @@ private:          return true;        } -      // No Decl, just an Expr. Just check based on its type. -      checkIndirectCall(Call, CalleeExpr->getType()); +      // No Decl, just an Expr. Just check based on its type. Bound member +      // functions are a special expression type and need to be specially +      // unpacked. +      QualType CalleeExprQT = CalleeExpr->getType(); +      if (CalleeExpr->isBoundMemberFunction(Outer.S.getASTContext())) { +        QualType QT = Expr::findBoundMemberType(CalleeExpr); +        if (!QT.isNull()) +          CalleeExprQT = QT; +      } +      checkIndirectCall(Call, CalleeExprQT);        return true;      } @@ -1271,7 +1279,15 @@ private:        const CXXConstructorDecl *Ctor = Construct->getConstructor();        CallableInfo CI(*Ctor);        followCall(CI, Construct->getLocation()); +      return true; +    } +    bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *BTE) override { +      const CXXDestructorDecl *Dtor = BTE->getTemporary()->getDestructor(); +      if (Dtor != nullptr) { +        CallableInfo CI(*Dtor); +        followCall(CI, BTE->getBeginLoc()); +      }        return true;      } diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp index f398963..5b3ef1a 100644 --- a/clang/lib/Sema/SemaStmt.cpp +++ b/clang/lib/Sema/SemaStmt.cpp @@ -3281,6 +3281,9 @@ static Scope *FindLabeledBreakContinueScope(Sema &S, Scope *CurScope,                                              SourceLocation LabelLoc,                                              bool IsContinue) {    assert(Target && "not a named break/continue?"); + +  Target->markUsed(S.Context); +    Scope *Found = nullptr;    for (Scope *Scope = CurScope; Scope; Scope = Scope->getParent()) {      if (Scope->isFunctionScope()) diff --git a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp index ad50600..bfcd397 100644 --- a/clang/lib/Sema/SemaTemplateDeductionGuide.cpp +++ b/clang/lib/Sema/SemaTemplateDeductionGuide.cpp @@ -659,7 +659,8 @@ private:                  SemaRef, MaterializedTypedefs, NestedPattern,                  TransformingOuterPatterns ? &Args : nullptr)                  .transform(NewDI); - +    if (!NewDI) +      return nullptr;      // Resolving a wording defect, we also inherit default arguments from the      // constructor.      ExprResult NewDefArg; diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp index 682fd25..c483930 100644 --- a/clang/lib/Sema/SemaType.cpp +++ b/clang/lib/Sema/SemaType.cpp @@ -2399,7 +2399,7 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,                                 VectorKind::Generic);  } -QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize, +QualType Sema::BuildExtVectorType(QualType T, Expr *SizeExpr,                                    SourceLocation AttrLoc) {    // Unlike gcc's vector_size attribute, we do not allow vectors to be defined    // in conjunction with complex types (pointers, arrays, functions, etc.). @@ -2422,40 +2422,40 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,        BIT && CheckBitIntElementType(*this, AttrLoc, BIT))      return QualType(); -  if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) { -    std::optional<llvm::APSInt> vecSize = -        ArraySize->getIntegerConstantExpr(Context); -    if (!vecSize) { +  if (!SizeExpr->isTypeDependent() && !SizeExpr->isValueDependent()) { +    std::optional<llvm::APSInt> VecSize = +        SizeExpr->getIntegerConstantExpr(Context); +    if (!VecSize) {        Diag(AttrLoc, diag::err_attribute_argument_type) -        << "ext_vector_type" << AANT_ArgumentIntegerConstant -        << ArraySize->getSourceRange(); +          << "ext_vector_type" << AANT_ArgumentIntegerConstant +          << SizeExpr->getSourceRange();        return QualType();      } -    if (vecSize->isNegative()) { -      Diag(ArraySize->getExprLoc(), diag::err_attribute_vec_negative_size); +    if (VecSize->isNegative()) { +      Diag(SizeExpr->getExprLoc(), diag::err_attribute_vec_negative_size);        return QualType();      } -    if (!vecSize->isIntN(32)) { +    if (!VecSize->isIntN(32)) {        Diag(AttrLoc, diag::err_attribute_size_too_large) -          << ArraySize->getSourceRange() << "vector"; +          << SizeExpr->getSourceRange() << "vector";        return QualType();      }      // Unlike gcc's vector_size attribute, the size is specified as the      // number of elements, not the number of bytes. -    unsigned vectorSize = static_cast<unsigned>(vecSize->getZExtValue()); +    unsigned VectorSize = static_cast<unsigned>(VecSize->getZExtValue()); -    if (vectorSize == 0) { +    if (VectorSize == 0) {        Diag(AttrLoc, diag::err_attribute_zero_size) -          << ArraySize->getSourceRange() << "vector"; +          << SizeExpr->getSourceRange() << "vector";        return QualType();      } -    return Context.getExtVectorType(T, vectorSize); +    return Context.getExtVectorType(T, VectorSize);    } -  return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc); +  return Context.getDependentSizedExtVectorType(T, SizeExpr, AttrLoc);  }  QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols, diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index 3ac338e..b1fd151 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -4374,8 +4374,7 @@ private:      // parent of parent. We DON'T remove the enum constant from its parent. So      // we don't need to care about merging problems here.      if (auto *ECD = dyn_cast<EnumConstantDecl>(D); -        ECD && DC.isFileContext() && ECD->getOwningModule() && -        ECD->getTopLevelOwningNamedModule()->isNamedModule()) { +        ECD && DC.isFileContext() && ECD->getTopLevelOwningNamedModule()) {        if (llvm::all_of(                DC.noload_lookup(                    cast<EnumDecl>(ECD->getDeclContext())->getDeclName()), diff --git a/clang/test/CodeGen/embed-bitcode-marker-with-nonzero-as.c b/clang/test/CodeGen/embed-bitcode-marker-with-nonzero-as.c index 8af9708..df71188 100644 --- a/clang/test/CodeGen/embed-bitcode-marker-with-nonzero-as.c +++ b/clang/test/CodeGen/embed-bitcode-marker-with-nonzero-as.c @@ -3,6 +3,6 @@  // CHECK: @llvm.embedded.module = private addrspace(1) constant [0 x i8] zeroinitializer, section ".llvmbc", align 1  // CHECK-NEXT: @llvm.cmdline = private addrspace(1) constant [{{[0-9]+}} x i8] c"{{.*}}", section ".llvmcmd", align 1 -// CHECK-NEXT: @llvm.compiler.used = appending addrspace(1) global [5 x ptr] [ptr addrspacecast (ptr addrspace(1) @foo.managed to ptr), ptr addrspacecast (ptr addrspace(1) @foo to ptr), ptr addrspacecast (ptr addrspace(1) @__hip_cuid_ to ptr), ptr addrspacecast (ptr addrspace(1) @llvm.embedded.module to ptr), ptr addrspacecast (ptr addrspace(1) @llvm.cmdline to ptr)], section "llvm.metadata" +// CHECK-NEXT: @llvm.compiler.used = appending addrspace(1) global [5 x ptr addrspace(4)] [ptr addrspace(4) addrspacecast (ptr addrspace(1) @foo.managed to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @foo to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @__hip_cuid_ to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @llvm.embedded.module to ptr addrspace(4)), ptr addrspace(4) addrspacecast (ptr addrspace(1) @llvm.cmdline to ptr addrspace(4))], section "llvm.metadata"  __attribute__((managed)) int foo = 42; diff --git a/clang/test/CodeGen/llvm_compiler_used_elements_are_unqual.c b/clang/test/CodeGen/llvm_compiler_used_elements_are_unqual.c deleted file mode 100644 index b6550fb..0000000 --- a/clang/test/CodeGen/llvm_compiler_used_elements_are_unqual.c +++ /dev/null @@ -1,64 +0,0 @@ -// RUN: %clang_cc1 -x c -triple x86_64-- -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=X86 -// RUN: %clang_cc1 -x c -triple amdgcn-amd-amdhsa -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=AMDGCN -// RUN: %clang_cc1 -x c -triple spirv64-- -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=SPIRV -// RUN: %clang_cc1 -x c -triple spirv64-amd-amdhsa -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=SPIRV_AMD -// RUN: %clang_cc1 -x cl -cl-std=CL1.2 -triple x86_64-- -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=X86 -// RUN: %clang_cc1 -x cl -cl-std=CL1.2 -triple amdgcn-amd-amdhsa -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=AMDGCN -// RUN: %clang_cc1 -x cl -cl-std=CL1.2 -triple spirv64-- -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=SPIRV_CL -// RUN: %clang_cc1 -x cl -cl-std=CL1.2 -triple spirv64-amd-amdhsa -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=SPIRV_AMD_CL -// RUN: %clang_cc1 -x cl -cl-std=CL2.0 -triple x86_64-- -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=X86 -// RUN: %clang_cc1 -x cl -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=AMDGCN -// RUN: %clang_cc1 -x cl -cl-std=CL2.0 -triple spirv64-- -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=SPIRV_CL -// RUN: %clang_cc1 -x cl -cl-std=CL2.0 -triple spirv64-amd-amdhsa -emit-llvm -x c %s -o - \ -// RUN:   | FileCheck %s --check-prefix=SPIRV_AMD_CL - -#ifndef __OPENCL_C_VERSION__ -#define __constant const -#endif - -static __constant __attribute__((__used__)) int foo = 42; - - -// X86: @foo = internal constant i32 42 -// X86: @llvm.compiler.used = appending global [2 x ptr] [ptr @foo, ptr @bar], section "llvm.metadata" -// -// AMDGCN: @foo = internal addrspace(4) constant i32 42 -// AMDGCN: @llvm.compiler.used = appending addrspace(1) global [2 x ptr] [ptr addrspacecast (ptr addrspace(4) @foo to ptr), ptr @bar], section "llvm.metadata" -// -// SPIRV: @foo = internal constant i32 42 -// SPIRV: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr @foo, ptr @bar], section "llvm.metadata" -// -// SPIRV_CL: @foo = internal addrspace(2) constant i32 42 -// SPIRV_CL: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr addrspacecast (ptr addrspace(2) @foo to ptr), ptr @bar], section "llvm.metadata" -// -// SPIRV_AMD: @foo = internal addrspace(1) constant i32 42 -// SPIRV_AMD: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr addrspacecast (ptr addrspace(1) @foo to ptr), ptr addrspacecast (ptr addrspace(4) @bar to ptr)], section "llvm.metadata" -// -// SPIRV_AMD_CL: @foo = internal addrspace(2) constant i32 42 -// SPIRV_AMD_CL: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr addrspacecast (ptr addrspace(2) @foo to ptr), ptr addrspacecast (ptr addrspace(4) @bar to ptr)], section "llvm.metadata" -// -// X86: define internal void @bar() #{{[0-9]}} { -// -// AMDGCN: define internal void @bar() #{{[0-9]}} { -// -// SPIRV: define internal spir_func void @bar() #{{[0-9]}} { -// -// SPIRV_CL: define internal spir_func void @bar() #{{[0-9]}} { -// -// SPIRV_AMD: define internal spir_func void @bar() addrspace(4) #{{[0-9]}} { -// -// SPIRV_AMD_CL: define internal spir_func void @bar() addrspace(4) #{{[0-9]}} { -// -static void __attribute__((__used__)) bar() { -} diff --git a/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap b/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap index 1869651..8ab6ae47 100644 --- a/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap +++ b/clang/test/Modules/Inputs/builtin-headers/system-modules.modulemap @@ -49,6 +49,11 @@ module cstd [system] [no_undeclared_includes] {      export *    } +  module stdckdint { +    header "stdckdint.h" +    export * +  } +    module stdcountof {      header "stdcountof.h"      export * diff --git a/clang/test/Modules/builtin-headers.mm b/clang/test/Modules/builtin-headers.mm index ad2d66a..6cd3662 100644 --- a/clang/test/Modules/builtin-headers.mm +++ b/clang/test/Modules/builtin-headers.mm @@ -17,6 +17,7 @@  @import _Builtin_stdarg;  @import _Builtin_stdatomic;  @import _Builtin_stdbool; +@import _Builtin_stdckdint;  @import _Builtin_stdcountof;  @import _Builtin_stddef;  @import _Builtin_stdint; diff --git a/clang/test/Modules/crash-enum-visibility-with-header-unit.cppm b/clang/test/Modules/crash-enum-visibility-with-header-unit.cppm new file mode 100644 index 0000000..90c5779 --- /dev/null +++ b/clang/test/Modules/crash-enum-visibility-with-header-unit.cppm @@ -0,0 +1,46 @@ +// Fixes #165445 + +// RUN: rm -rf %t +// RUN: mkdir -p %t +// RUN: split-file %s %t +// +// RUN: %clang_cc1 -std=c++20 -x c++-user-header %t/header.h \ +// RUN:   -emit-header-unit -o %t/header.pcm +// +// RUN: %clang_cc1 -std=c++20 %t/A.cppm -fmodule-file=%t/header.pcm \ +// RUN:   -emit-module-interface -o %t/A.pcm +//  +// RUN: %clang_cc1 -std=c++20 %t/B.cppm -fmodule-file=%t/header.pcm \ +// RUN:   -emit-module-interface -o %t/B.pcm +// +// RUN: %clang_cc1 -std=c++20 %t/use.cpp \ +// RUN:   -fmodule-file=A=%t/A.pcm -fmodule-file=B=%t/B.pcm  \ +// RUN:   -fmodule-file=%t/header.pcm \ +// RUN:   -verify -fsyntax-only + +//--- enum.h +enum E { Value }; + +//--- header.h +#include "enum.h" + +//--- A.cppm +module; +#include "enum.h" +export module A; + +auto e = Value; + +//--- B.cppm +export module B; +import "header.h"; + +auto e = Value; + +//--- use.cpp +// expected-no-diagnostics +import A; +import B; +#include "enum.h" + +auto e = Value; diff --git a/clang/test/Sema/attr-nonblocking-constraints.cpp b/clang/test/Sema/attr-nonblocking-constraints.cpp index b26a945..881e816 100644 --- a/clang/test/Sema/attr-nonblocking-constraints.cpp +++ b/clang/test/Sema/attr-nonblocking-constraints.cpp @@ -235,16 +235,35 @@ void nb13() [[clang::nonblocking]] { nb12(); }  // C++ member function pointers  struct PTMFTester {  	typedef void (PTMFTester::*ConvertFunction)() [[clang::nonblocking]]; - -	void convert() [[clang::nonblocking]]; +	typedef void (PTMFTester::*BlockingFunction)();  	ConvertFunction mConvertFunc; -}; -void PTMFTester::convert() [[clang::nonblocking]] -{ -	(this->*mConvertFunc)(); -} +	void convert() [[clang::nonblocking]] +	{ +		(this->*mConvertFunc)(); // This should not generate a warning. +	} + +	template <typename T> +	struct Holder { +		T value; +		 +		T& operator*() { return value; } +	}; + + +	void ptmfInExpr(Holder<ConvertFunction>& holder) [[clang::nonblocking]] +	{ +		(this->*(*holder))();   // Should not generate a warning. +		((*this).*(*holder))(); // Should not generate a warning. +	} + +	void ptmfInExpr(Holder<BlockingFunction>& holder) [[clang::nonblocking]] +	{ +		(this->*(*holder))(); // expected-warning {{function with 'nonblocking' attribute must not call non-'nonblocking' expression}} +		((*this).*(*holder))(); // expected-warning {{function with 'nonblocking' attribute must not call non-'nonblocking' expression}} +	} +};  // Allow implicit conversion from array to pointer.  void nb14(unsigned idx) [[clang::nonblocking]] @@ -354,6 +373,33 @@ struct Unsafe {    Unsafe(float y) [[clang::nonblocking]] : Unsafe(int(y)) {} // expected-warning {{constructor with 'nonblocking' attribute must not call non-'nonblocking' constructor 'Unsafe::Unsafe'}}  }; +// Exercise cases of a temporary with a safe constructor and unsafe destructor. +void nb23() +{ +	struct X { +		int *ptr = nullptr; +		X() {} +		~X() { delete ptr; } // expected-note 2 {{destructor cannot be inferred 'nonblocking' because it allocates or deallocates memory}} +	}; + +	auto inner = []() [[clang::nonblocking]] { +		X(); // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor 'nb23()::X::~X'}} +	}; + +	auto inner2 = [](X x) [[clang::nonblocking]] { // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor 'nb23()::X::~X'}} +	}; + +} + +struct S2 { ~S2(); }; // expected-note 2 {{declaration cannot be inferred 'nonblocking' because it has no definition in this translation unit}} +void nb24() { +    S2 s; +    [&]() [[clang::nonblocking]] { +        [s]{ auto x = &s; }(); // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor}} expected-note {{destructor cannot be inferred 'nonblocking' because it calls non-'nonblocking' destructor 'S2::~S2'}} +        [=]{ auto x = &s; }(); // expected-warning {{lambda with 'nonblocking' attribute must not call non-'nonblocking' destructor}} expected-note {{destructor cannot be inferred 'nonblocking' because it calls non-'nonblocking' destructor 'S2::~S2'}} +    }(); +} +  struct DerivedFromUnsafe : public Unsafe {    DerivedFromUnsafe() [[clang::nonblocking]] {} // expected-warning {{constructor with 'nonblocking' attribute must not call non-'nonblocking' constructor 'Unsafe::Unsafe'}}    DerivedFromUnsafe(int x) [[clang::nonblocking]] : Unsafe(x) {} // expected-warning {{constructor with 'nonblocking' attribute must not call non-'nonblocking' constructor 'Unsafe::Unsafe'}} diff --git a/clang/test/Sema/labeled-break-continue.c b/clang/test/Sema/labeled-break-continue.c index 78f81c4..6b4adc2 100644 --- a/clang/test/Sema/labeled-break-continue.c +++ b/clang/test/Sema/labeled-break-continue.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -std=c2y -verify -fsyntax-only -fblocks %s -// RUN: %clang_cc1 -std=c23 -verify -fsyntax-only -fblocks -fnamed-loops %s -// RUN: %clang_cc1 -x c++ -verify -fsyntax-only -fblocks -fnamed-loops %s +// RUN: %clang_cc1 -std=c2y -verify -Wunused -fsyntax-only -fblocks %s +// RUN: %clang_cc1 -std=c23 -verify -Wunused -fsyntax-only -fblocks -fnamed-loops %s +// RUN: %clang_cc1 -x c++ -verify -Wunused -fsyntax-only -fblocks -fnamed-loops %s  void f1() {    l1: while (true) { @@ -159,3 +159,15 @@ void f7() {      continue d; // expected-error {{'continue' label does not name an enclosing loop}}    }  } + +void f8() { +  l1: // no-warning +  while (true) { +    break l1; +  } + +  l2: // no-warning +  while (true) { +    continue l2; +  } +} diff --git a/clang/test/SemaTemplate/ctad.cpp b/clang/test/SemaTemplate/ctad.cpp index 1a575ea..60603f0 100644 --- a/clang/test/SemaTemplate/ctad.cpp +++ b/clang/test/SemaTemplate/ctad.cpp @@ -104,3 +104,15 @@ namespace ConvertDeducedTemplateArgument {    auto x = C(D<A::B>());  } + +namespace pr165560 { +template <class T, class> struct S { +  using A = T; +  template <class> struct I { // expected-note{{candidate function template not viable: requires 1 argument, but 0 were provided}} \ +                              // expected-note{{implicit deduction guide declared as 'template <class> I(pr165560::S<int, int>::I<type-parameter-0-0>) -> pr165560::S<int, int>::I<type-parameter-0-0>'}} +    I(typename A::F) {} // expected-error{{type 'A' (aka 'int') cannot be used prior to '::' because it has no members}} +  }; +}; +S<int, int>::I i; // expected-error{{no viable constructor or deduction guide for deduction of template arguments of 'S<int, int>::I'}} \ +                  // expected-note{{while building implicit deduction guide first needed here}} +} diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp index ca9e792..24235b9 100644 --- a/clang/unittests/Format/FormatTest.cpp +++ b/clang/unittests/Format/FormatTest.cpp @@ -20824,6 +20824,13 @@ TEST_F(FormatTest, AlignWithLineBreaks) {                 "    argument1,\n"                 "    argument2);",                 Style); + +  Style.ColumnLimit = 45; +  verifyFormat("auto xxxxxxxx = foo;\n" +               "auto x = whatever ? some / long -\n" +               "                        computition / stuff\n" +               "                  : random;", +               Style);  }  TEST_F(FormatTest, AlignWithInitializerPeriods) { diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c index c21b2ba..45b7055 100644 --- a/compiler-rt/lib/builtins/cpu_model/x86.c +++ b/compiler-rt/lib/builtins/cpu_model/x86.c @@ -21,7 +21,9 @@  #if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER) +#if __STDC_HOSTED__  #include <assert.h> +#endif // __STDC_HOSTED__  #if (defined(__GNUC__) || defined(__clang__)) && !defined(_MSC_VER)  #include <cpuid.h> @@ -245,8 +247,8 @@ struct __processor_model {    unsigned int __cpu_features[1];  } __cpu_model = {0, 0, 0, {0}}; -static_assert(sizeof(__cpu_model) == 16, -              "Wrong size of __cpu_model will result in ABI break"); +_Static_assert(sizeof(__cpu_model) == 16, +               "Wrong size of __cpu_model will result in ABI break");  // This code is copied from lib/Support/Host.cpp.  // Changes to either file should be mirrored in the other. @@ -1200,8 +1202,8 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {    unsigned Vendor;    unsigned Model, Family;    unsigned Features[(CPU_FEATURE_MAX + 31) / 32] = {0}; -  static_assert(sizeof(Features) / sizeof(Features[0]) == 4, ""); -  static_assert(sizeof(__cpu_features2) / sizeof(__cpu_features2[0]) == 3, ""); +  _Static_assert(sizeof(Features) / sizeof(Features[0]) == 4, ""); +  _Static_assert(sizeof(__cpu_features2) / sizeof(__cpu_features2[0]) == 3, "");    // This function needs to run just once.    if (__cpu_model.__cpu_vendor) @@ -1234,9 +1236,11 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {    } else      __cpu_model.__cpu_vendor = VENDOR_OTHER; +#if __STDC_HOSTED__    assert(__cpu_model.__cpu_vendor < VENDOR_MAX);    assert(__cpu_model.__cpu_type < CPU_TYPE_MAX);    assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX); +#endif // __STDC_HOSTED__    return 0;  } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp index b0a29db..90c0b66 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -960,7 +960,17 @@ static void DisableMmapExcGuardExceptions() {        RTLD_DEFAULT, "task_set_exc_guard_behavior");    if (set_behavior == nullptr) return;    const task_exc_guard_behavior_t task_exc_guard_none = 0; -  set_behavior(mach_task_self(), task_exc_guard_none); +  kern_return_t res = set_behavior(mach_task_self(), task_exc_guard_none); +  if (res != KERN_SUCCESS) { +    Report( +        "WARN: task_set_exc_guard_behavior returned %d (%s), " +        "mmap may fail unexpectedly.\n", +        res, mach_error_string(res)); +    if (res == KERN_DENIED) +      Report( +          "HINT: Check that task_set_exc_guard_behavior is allowed by " +          "sandbox.\n"); +  }  }  static void VerifyInterceptorsWorking(); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp index f8d821e..7eb0c97 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp @@ -505,6 +505,13 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,    }  #  if SANITIZER_APPLE +  if (list->empty()) { +    Report( +        "WARN: No external symbolizers found. Symbols may be missing or " +        "unreliable.\n"); +    Report( +        "HINT: Is PATH set? Does sandbox allow file-read of /usr/bin/atos?\n"); +  }    VReport(2, "Using dladdr symbolizer.\n");    list->push_back(new (*allocator) DlAddrSymbolizer());  #  endif  // SANITIZER_APPLE diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def index 7485308..0aea7b8 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.def +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -57,6 +57,10 @@ BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)  // Disable the quarantine code.  BASE_OPTIONAL(const bool, QuarantineDisabled, false) +// If set to true, malloc_usable_size returns the exact size of the allocation. +// If set to false, return the total available size in the allocation. +BASE_OPTIONAL(const bool, ExactUsableSize, true) +  // PRIMARY_REQUIRED_TYPE(NAME)  //  // SizeClassMap to use with the Primary. diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index 329ec45..ffe9554 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -706,19 +706,26 @@ public:          if (!getChunkFromBlock(Block, &Chunk, &Header) &&              !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))            return; -      } else { -        if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) -          return; +      } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) { +        return;        } -      if (Header.State == Chunk::State::Allocated) { -        uptr TaggedChunk = Chunk; -        if (allocatorSupportsMemoryTagging<AllocatorConfig>()) -          TaggedChunk = untagPointer(TaggedChunk); -        if (useMemoryTagging<AllocatorConfig>(Primary.Options.load())) -          TaggedChunk = loadTag(Chunk); -        Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header), -                 Arg); + +      if (Header.State != Chunk::State::Allocated) +        return; + +      uptr TaggedChunk = Chunk; +      if (allocatorSupportsMemoryTagging<AllocatorConfig>()) +        TaggedChunk = untagPointer(TaggedChunk); +      uptr Size; +      if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) { +        TaggedChunk = loadTag(Chunk); +        Size = getSize(reinterpret_cast<void *>(Chunk), &Header); +      } else if (AllocatorConfig::getExactUsableSize()) { +        Size = getSize(reinterpret_cast<void *>(Chunk), &Header); +      } else { +        Size = getUsableSize(reinterpret_cast<void *>(Chunk), &Header);        } +      Callback(TaggedChunk, Size, Arg);      };      Primary.iterateOverBlocks(Lambda);      Secondary.iterateOverBlocks(Lambda); @@ -759,16 +766,50 @@ public:      return false;    } -  // Return the usable size for a given chunk. Technically we lie, as we just -  // report the actual size of a chunk. This is done to counteract code actively -  // writing past the end of a chunk (like sqlite3) when the usable size allows -  // for it, which then forces realloc to copy the usable size of a chunk as -  // opposed to its actual size. +  ALWAYS_INLINE uptr getUsableSize(const void *Ptr, +                                   Chunk::UnpackedHeader *Header) { +    void *BlockBegin = getBlockBegin(Ptr, Header); +    if (LIKELY(Header->ClassId)) { +      return SizeClassMap::getSizeByClassId(Header->ClassId) - +             (reinterpret_cast<uptr>(Ptr) - reinterpret_cast<uptr>(BlockBegin)); +    } + +    uptr UntaggedPtr = reinterpret_cast<uptr>(Ptr); +    if (allocatorSupportsMemoryTagging<AllocatorConfig>()) { +      UntaggedPtr = untagPointer(UntaggedPtr); +      BlockBegin = untagPointer(BlockBegin); +    } +    return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr; +  } + +  // Return the usable size for a given chunk. If MTE is enabled or if the +  // ExactUsableSize config parameter is true, we report the exact size of +  // the original allocation size. Otherwise, we will return the total +  // actual usable size.    uptr getUsableSize(const void *Ptr) {      if (UNLIKELY(!Ptr))        return 0; -    return getAllocSize(Ptr); +    if (AllocatorConfig::getExactUsableSize() || +        UNLIKELY(useMemoryTagging<AllocatorConfig>(Primary.Options.load()))) +      return getAllocSize(Ptr); + +    initThreadMaybe(); + +#ifdef GWP_ASAN_HOOKS +    if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) +      return GuardedAlloc.getSize(Ptr); +#endif // GWP_ASAN_HOOKS + +    Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr)); +    Chunk::UnpackedHeader Header; +    Chunk::loadHeader(Cookie, Ptr, &Header); + +    // Getting the alloc size of a chunk only makes sense if it's allocated. +    if (UNLIKELY(Header.State != Chunk::State::Allocated)) +      reportInvalidChunkState(AllocatorAction::Sizing, Ptr); + +    return getUsableSize(Ptr, &Header);    }    uptr getAllocSize(const void *Ptr) { @@ -951,6 +992,19 @@ public:                           MemorySize, 2, 16);    } +  uptr getBlockBeginTestOnly(const void *Ptr) { +    Chunk::UnpackedHeader Header; +    Chunk::loadHeader(Cookie, Ptr, &Header); +    DCHECK(Header.State == Chunk::State::Allocated); + +    if (allocatorSupportsMemoryTagging<AllocatorConfig>()) +      Ptr = untagPointer(const_cast<void *>(Ptr)); +    void *Begin = getBlockBegin(Ptr, &Header); +    if (allocatorSupportsMemoryTagging<AllocatorConfig>()) +      Begin = untagPointer(Begin); +    return reinterpret_cast<uptr>(Begin); +  } +  private:    typedef typename PrimaryT::SizeClassMap SizeClassMap; diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 5fdfd1e..4837ac9 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -1152,6 +1152,248 @@ TEST(ScudoCombinedTest, QuarantineDisabled) {    EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);  } +struct UsableSizeClassConfig { +  static const scudo::uptr NumBits = 1; +  static const scudo::uptr MinSizeLog = 10; +  static const scudo::uptr MidSizeLog = 10; +  static const scudo::uptr MaxSizeLog = 13; +  static const scudo::u16 MaxNumCachedHint = 8; +  static const scudo::uptr MaxBytesCachedLog = 12; +  static const scudo::uptr SizeDelta = 0; +}; + +struct TestExactUsableSizeConfig { +  static const bool MaySupportMemoryTagging = false; +  static const bool QuarantineDisabled = true; + +  template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>; + +  struct Primary { +    // In order to properly test the usable size, this Primary config has +    // four real size classes: 1024, 2048, 4096, 8192. +    using SizeClassMap = scudo::FixedSizeClassMap<UsableSizeClassConfig>; +    static const scudo::uptr RegionSizeLog = 21U; +    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; +    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; +    typedef scudo::uptr CompactPtrT; +    static const scudo::uptr CompactPtrScale = 0; +    static const bool EnableRandomOffset = true; +    static const scudo::uptr MapSizeIncrement = 1UL << 18; +    static const scudo::uptr GroupSizeLog = 18; +  }; +  template <typename Config> +  using PrimaryT = scudo::SizeClassAllocator64<Config>; + +  struct Secondary { +    template <typename Config> +    using CacheT = scudo::MapAllocatorNoCache<Config>; +  }; + +  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>; +}; + +template <class AllocatorT> void VerifyExactUsableSize(AllocatorT &Allocator) { +  // Scan through all sizes up to 10000 then some larger sizes. +  for (scudo::uptr Size = 1; Size < 10000; Size++) { +    void *P = Allocator.allocate(Size, Origin); +    EXPECT_EQ(Size, Allocator.getUsableSize(P)) +        << "Failed usable size at allocation size " << Size; +    Allocator.deallocate(P, Origin); +  } + +  // Verify that aligned allocations also return the exact size allocated. +  const scudo::uptr AllocSize = 313; +  for (scudo::uptr Align = 1; Align <= 8; Align++) { +    void *P = Allocator.allocate(AllocSize, Origin, 1U << Align); +    EXPECT_EQ(AllocSize, Allocator.getUsableSize(P)) +        << "Failed usable size at allocation size " << AllocSize << " at align " +        << 1 << Align; +    Allocator.deallocate(P, Origin); +  } + +  // Verify an explicitly large allocations. +  const scudo::uptr LargeAllocSize = 1000000; +  void *P = Allocator.allocate(LargeAllocSize, Origin); +  EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)); +  Allocator.deallocate(P, Origin); + +  // Now do it for aligned allocations for large allocations. +  for (scudo::uptr Align = 1; Align <= 8; Align++) { +    void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align); +    EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)) +        << "Failed usable size at allocation size " << AllocSize << " at align " +        << 1 << Align; +    Allocator.deallocate(P, Origin); +  } +} + +template <class AllocatorT> +void VerifyIterateOverUsableSize(AllocatorT &Allocator) { +  // This will not verify if the size is the exact size or the size of the +  // size class. Instead verify that the size matches the usable size and +  // assume the other tests have verified getUsableSize. +  std::unordered_map<void *, size_t> Pointers; +  Pointers.insert({Allocator.allocate(128, Origin), 0U}); +  Pointers.insert({Allocator.allocate(128, Origin, 32), 0U}); +  Pointers.insert({Allocator.allocate(2000, Origin), 0U}); +  Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U}); +  Pointers.insert({Allocator.allocate(8000, Origin), 0U}); +  Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U}); +  Pointers.insert({Allocator.allocate(2000205, Origin), 0U}); +  Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U}); +  Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U}); + +  Allocator.disable(); +  Allocator.iterateOverChunks( +      0, static_cast<scudo::uptr>(SCUDO_MMAP_RANGE_SIZE - 1), +      [](uintptr_t Base, size_t Size, void *Arg) { +        std::unordered_map<void *, size_t> *Pointers = +            reinterpret_cast<std::unordered_map<void *, size_t> *>(Arg); +        (*Pointers)[reinterpret_cast<void *>(Base)] = Size; +      }, +      reinterpret_cast<void *>(&Pointers)); +  Allocator.enable(); + +  for (auto [Ptr, IterateSize] : Pointers) { +    EXPECT_NE(0U, IterateSize) +        << "Pointer " << Ptr << " not found in iterateOverChunks call."; +    EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr)) +        << "Pointer " << Ptr +        << " mismatch between iterate size and usable size."; +    Allocator.deallocate(Ptr, Origin); +  } +} + +TEST(ScudoCombinedTest, ExactUsableSize) { +  using AllocatorT = scudo::Allocator<TestExactUsableSizeConfig>; +  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); + +  VerifyExactUsableSize<AllocatorT>(*Allocator); +  VerifyIterateOverUsableSize<AllocatorT>(*Allocator); +} + +struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig { +  static const bool MaySupportMemoryTagging = true; +}; + +TEST(ScudoCombinedTest, ExactUsableSizeMTE) { +  if (!scudo::archSupportsMemoryTagging() || +      !scudo::systemDetectsMemoryTagFaultsTestOnly()) +    TEST_SKIP("Only supported on systems that can enable MTE."); + +  scudo::enableSystemMemoryTaggingTestOnly(); + +  using AllocatorT = scudo::Allocator<TestExactUsableSizeMTEConfig>; +  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); + +  VerifyExactUsableSize<AllocatorT>(*Allocator); +  VerifyIterateOverUsableSize<AllocatorT>(*Allocator); +} + +template <class AllocatorT> +void VerifyUsableSizePrimary(AllocatorT &Allocator) { +  std::vector<scudo::uptr> SizeClasses = {1024U, 2048U, 4096U, 8192U}; +  for (size_t I = 0; I < SizeClasses.size(); I++) { +    scudo::uptr SizeClass = SizeClasses[I]; +    scudo::uptr StartSize; +    if (I == 0) +      StartSize = 1; +    else +      StartSize = SizeClasses[I - 1]; +    scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize(); +    for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) { +      void *P = Allocator.allocate(Size, Origin); +      EXPECT_EQ(UsableSize, Allocator.getUsableSize(P)) +          << "Failed usable size at allocation size " << Size +          << " for size class " << SizeClass; +      memset(P, 0xff, UsableSize); +      EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass, +                reinterpret_cast<scudo::uptr>(P) + UsableSize); +      Allocator.deallocate(P, Origin); +    } + +    StartSize = UsableSize + 1; +  } + +  std::vector<scudo::uptr> Alignments = {32U, 128U}; +  for (size_t I = 0; I < SizeClasses.size(); I++) { +    scudo::uptr SizeClass = SizeClasses[I]; +    scudo::uptr AllocSize; +    if (I == 0) +      AllocSize = 1; +    else +      AllocSize = SizeClasses[I - 1] + 1; + +    for (auto Alignment : Alignments) { +      void *P = Allocator.allocate(AllocSize, Origin, Alignment); +      scudo::uptr UsableSize = Allocator.getUsableSize(P); +      memset(P, 0xff, UsableSize); +      EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass, +                reinterpret_cast<scudo::uptr>(P) + UsableSize) +          << "Failed usable size at allocation size " << AllocSize +          << " for size class " << SizeClass << " at alignment " << Alignment; +      Allocator.deallocate(P, Origin); +    } +  } +} + +template <class AllocatorT> +void VerifyUsableSizeSecondary(AllocatorT &Allocator) { +  const scudo::uptr LargeAllocSize = 996780; +  const scudo::uptr PageSize = scudo::getPageSizeCached(); +  void *P = Allocator.allocate(LargeAllocSize, Origin); +  scudo::uptr UsableSize = Allocator.getUsableSize(P); +  memset(P, 0xff, UsableSize); +  // Assumes that the secondary always rounds up allocations to a page boundary. +  EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize, +                           PageSize), +            reinterpret_cast<scudo::uptr>(P) + UsableSize); +  Allocator.deallocate(P, Origin); + +  // Check aligned allocations now. +  for (scudo::uptr Alignment = 1; Alignment <= 8; Alignment++) { +    void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Alignment); +    scudo::uptr UsableSize = Allocator.getUsableSize(P); +    EXPECT_EQ(scudo::roundUp(reinterpret_cast<scudo::uptr>(P) + LargeAllocSize, +                             PageSize), +              reinterpret_cast<scudo::uptr>(P) + UsableSize) +        << "Failed usable size at allocation size " << LargeAllocSize +        << " at alignment " << Alignment; +    Allocator.deallocate(P, Origin); +  } +} + +struct TestFullUsableSizeConfig : TestExactUsableSizeConfig { +  static const bool ExactUsableSize = false; +}; + +TEST(ScudoCombinedTest, FullUsableSize) { +  using AllocatorT = scudo::Allocator<TestFullUsableSizeConfig>; +  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); + +  VerifyUsableSizePrimary<AllocatorT>(*Allocator); +  VerifyUsableSizeSecondary<AllocatorT>(*Allocator); +  VerifyIterateOverUsableSize<AllocatorT>(*Allocator); +} + +struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig { +  static const bool MaySupportMemoryTagging = true; +}; + +TEST(ScudoCombinedTest, FullUsableSizeMTE) { +  if (!scudo::archSupportsMemoryTagging() || +      !scudo::systemDetectsMemoryTagFaultsTestOnly()) +    TEST_SKIP("Only supported on systems that can enable MTE."); + +  scudo::enableSystemMemoryTaggingTestOnly(); + +  using AllocatorT = scudo::Allocator<TestFullUsableSizeMTEConfig>; +  auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT()); + +  // When MTE is enabled, you get exact sizes. +  VerifyExactUsableSize<AllocatorT>(*Allocator); +  VerifyIterateOverUsableSize<AllocatorT>(*Allocator); +}  // Verify that no special quarantine blocks appear in iterateOverChunks.  TEST(ScudoCombinedTest, QuarantineIterateOverChunks) {    using AllocatorT = TestAllocator<TestQuarantineConfig>; diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp index 612317b..9e5d065 100644 --- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -588,8 +588,13 @@ TEST_F(ScudoWrappersCTest, MallocInfo) {    EXPECT_EQ(errno, 0);    fclose(F);    EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0); -  EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\"")); -  EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\"")); +  std::string expected; +  expected = +      "<alloc size=\"" + std::to_string(malloc_usable_size(P1)) + "\" count=\""; +  EXPECT_NE(nullptr, strstr(Buffer, expected.c_str())); +  expected = +      "<alloc size=\"" + std::to_string(malloc_usable_size(P2)) + "\" count=\""; +  EXPECT_NE(nullptr, strstr(Buffer, expected.c_str()));    free(P1);    free(P2); diff --git a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h index 3407dd0..bbdef48 100644 --- a/flang/include/flang/Optimizer/Builder/IntrinsicCall.h +++ b/flang/include/flang/Optimizer/Builder/IntrinsicCall.h @@ -188,6 +188,10 @@ struct IntrinsicLibrary {    fir::ExtendedValue genAny(mlir::Type, llvm::ArrayRef<fir::ExtendedValue>);    mlir::Value genAtanpi(mlir::Type, llvm::ArrayRef<mlir::Value>);    mlir::Value genAtomicAdd(mlir::Type, llvm::ArrayRef<mlir::Value>); +  fir::ExtendedValue genAtomicAddR2(mlir::Type, +                                    llvm::ArrayRef<fir::ExtendedValue>); +  fir::ExtendedValue genAtomicAddVector(mlir::Type, +                                        llvm::ArrayRef<fir::ExtendedValue>);    mlir::Value genAtomicAnd(mlir::Type, llvm::ArrayRef<mlir::Value>);    fir::ExtendedValue genAtomicCas(mlir::Type,                                    llvm::ArrayRef<fir::ExtendedValue>); diff --git a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp index 15ea845..b9ea8b1 100644 --- a/flang/lib/Optimizer/Builder/IntrinsicCall.cpp +++ b/flang/lib/Optimizer/Builder/IntrinsicCall.cpp @@ -290,10 +290,22 @@ static constexpr IntrinsicHandler handlers[]{      {"atan2pi", &I::genAtanpi},      {"atand", &I::genAtand},      {"atanpi", &I::genAtanpi}, +    {"atomicadd_r2x2", +     &I::genAtomicAddVector, +     {{{"a", asAddr}, {"v", asAddr}}}, +     false}, +    {"atomicadd_r4x2", +     &I::genAtomicAddVector, +     {{{"a", asAddr}, {"v", asAddr}}}, +     false},      {"atomicaddd", &I::genAtomicAdd, {{{"a", asAddr}, {"v", asValue}}}, false},      {"atomicaddf", &I::genAtomicAdd, {{{"a", asAddr}, {"v", asValue}}}, false},      {"atomicaddi", &I::genAtomicAdd, {{{"a", asAddr}, {"v", asValue}}}, false},      {"atomicaddl", &I::genAtomicAdd, {{{"a", asAddr}, {"v", asValue}}}, false}, +    {"atomicaddr2", +     &I::genAtomicAddR2, +     {{{"a", asAddr}, {"v", asAddr}}}, +     false},      {"atomicandi", &I::genAtomicAnd, {{{"a", asAddr}, {"v", asValue}}}, false},      {"atomiccasd",       &I::genAtomicCas, @@ -3119,7 +3131,6 @@ static mlir::Value genAtomBinOp(fir::FirOpBuilder &builder, mlir::Location &loc,  mlir::Value IntrinsicLibrary::genAtomicAdd(mlir::Type resultType,                                             llvm::ArrayRef<mlir::Value> args) {    assert(args.size() == 2); -    mlir::LLVM::AtomicBinOp binOp =        mlir::isa<mlir::IntegerType>(args[1].getType())            ? mlir::LLVM::AtomicBinOp::add @@ -3127,6 +3138,85 @@ mlir::Value IntrinsicLibrary::genAtomicAdd(mlir::Type resultType,    return genAtomBinOp(builder, loc, binOp, args[0], args[1]);  } +fir::ExtendedValue +IntrinsicLibrary::genAtomicAddR2(mlir::Type resultType, +                                 llvm::ArrayRef<fir::ExtendedValue> args) { +  assert(args.size() == 2); + +  mlir::Value a = fir::getBase(args[0]); + +  if (mlir::isa<fir::BaseBoxType>(a.getType())) { +    a = fir::BoxAddrOp::create(builder, loc, a); +  } + +  auto loc = builder.getUnknownLoc(); +  auto f16Ty = builder.getF16Type(); +  auto i32Ty = builder.getI32Type(); +  auto vecF16Ty = mlir::VectorType::get({2}, f16Ty); +  mlir::Type idxTy = builder.getIndexType(); +  auto f16RefTy = fir::ReferenceType::get(f16Ty); +  auto zero = builder.createIntegerConstant(loc, idxTy, 0); +  auto one = builder.createIntegerConstant(loc, idxTy, 1); +  auto v1Coord = fir::CoordinateOp::create(builder, loc, f16RefTy, +                                           fir::getBase(args[1]), zero); +  auto v2Coord = fir::CoordinateOp::create(builder, loc, f16RefTy, +                                           fir::getBase(args[1]), one); +  auto v1 = fir::LoadOp::create(builder, loc, v1Coord); +  auto v2 = fir::LoadOp::create(builder, loc, v2Coord); +  mlir::Value undef = mlir::LLVM::UndefOp::create(builder, loc, vecF16Ty); +  mlir::Value vec1 = mlir::LLVM::InsertElementOp::create( +      builder, loc, undef, v1, builder.createIntegerConstant(loc, i32Ty, 0)); +  mlir::Value vec2 = mlir::LLVM::InsertElementOp::create( +      builder, loc, vec1, v2, builder.createIntegerConstant(loc, i32Ty, 1)); +  auto res = genAtomBinOp(builder, loc, mlir::LLVM::AtomicBinOp::fadd, a, vec2); +  auto i32VecTy = mlir::VectorType::get({1}, i32Ty); +  mlir::Value vecI32 = +      mlir::vector::BitCastOp::create(builder, loc, i32VecTy, res); +  return mlir::vector::ExtractOp::create(builder, loc, vecI32, +                                         mlir::ArrayRef<int64_t>{0}); +} + +fir::ExtendedValue +IntrinsicLibrary::genAtomicAddVector(mlir::Type resultType, +                                     llvm::ArrayRef<fir::ExtendedValue> args) { +  assert(args.size() == 2); +  mlir::Value res = fir::AllocaOp::create( +      builder, loc, fir::SequenceType::get({2}, resultType)); +  mlir::Value a = fir::getBase(args[0]); +  if (mlir::isa<fir::BaseBoxType>(a.getType())) { +    a = fir::BoxAddrOp::create(builder, loc, a); +  } +  auto vecTy = mlir::VectorType::get({2}, resultType); +  auto refTy = fir::ReferenceType::get(resultType); +  mlir::Type i32Ty = builder.getI32Type(); +  mlir::Type idxTy = builder.getIndexType(); +  mlir::Value zero = builder.createIntegerConstant(loc, idxTy, 0); +  mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1); +  mlir::Value v1Coord = fir::CoordinateOp::create(builder, loc, refTy, +                                                  fir::getBase(args[1]), zero); +  mlir::Value v2Coord = fir::CoordinateOp::create(builder, loc, refTy, +                                                  fir::getBase(args[1]), one); +  mlir::Value v1 = fir::LoadOp::create(builder, loc, v1Coord); +  mlir::Value v2 = fir::LoadOp::create(builder, loc, v2Coord); +  mlir::Value undef = mlir::LLVM::UndefOp::create(builder, loc, vecTy); +  mlir::Value vec1 = mlir::LLVM::InsertElementOp::create( +      builder, loc, undef, v1, builder.createIntegerConstant(loc, i32Ty, 0)); +  mlir::Value vec2 = mlir::LLVM::InsertElementOp::create( +      builder, loc, vec1, v2, builder.createIntegerConstant(loc, i32Ty, 1)); +  mlir::Value add = +      genAtomBinOp(builder, loc, mlir::LLVM::AtomicBinOp::fadd, a, vec2); +  mlir::Value r1 = mlir::LLVM::ExtractElementOp::create( +      builder, loc, add, builder.createIntegerConstant(loc, i32Ty, 0)); +  mlir::Value r2 = mlir::LLVM::ExtractElementOp::create( +      builder, loc, add, builder.createIntegerConstant(loc, i32Ty, 1)); +  mlir::Value c1 = fir::CoordinateOp::create(builder, loc, refTy, res, zero); +  mlir::Value c2 = fir::CoordinateOp::create(builder, loc, refTy, res, one); +  fir::StoreOp::create(builder, loc, r1, c1); +  fir::StoreOp::create(builder, loc, r2, c2); +  mlir::Value ext = builder.createIntegerConstant(loc, idxTy, 2); +  return fir::ArrayBoxValue(res, {ext}); +} +  mlir::Value IntrinsicLibrary::genAtomicSub(mlir::Type resultType,                                             llvm::ArrayRef<mlir::Value> args) {    assert(args.size() == 2); @@ -3345,13 +3435,12 @@ IntrinsicLibrary::genBarrierArriveCnt(mlir::Type resultType,    assert(args.size() == 2);    mlir::Value barrier = convertPtrToNVVMSpace(        builder, loc, args[0], mlir::NVVM::NVVMMemorySpace::Shared); -  mlir::Value token = fir::AllocaOp::create(builder, loc, resultType); -  // TODO: the MBarrierArriveExpectTxOp is not taking the state argument and -  // currently just the sink symbol `_`. -  // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-mbarrier-arrive -  mlir::NVVM::MBarrierArriveExpectTxOp::create(builder, loc, barrier, args[1], -                                               {}); -  return fir::LoadOp::create(builder, loc, token); +  return mlir::NVVM::InlinePtxOp::create(builder, loc, {resultType}, +                                         {barrier, args[1]}, {}, +                                         "mbarrier.arrive.expect_tx.release." +                                         "cta.shared::cta.b64 %0, [%1], %2;", +                                         {}) +      .getResult(0);  }  // BARRIER_INIT (CUDA) @@ -3392,13 +3481,15 @@ IntrinsicLibrary::genBarrierTryWait(mlir::Type resultType,    builder.setInsertionPointToStart(afterBlock);    auto llvmPtrTy = mlir::LLVM::LLVMPointerType::get(builder.getContext());    auto barrier = builder.createConvert(loc, llvmPtrTy, args[0]); -  mlir::Value ret = -      mlir::NVVM::InlinePtxOp::create( -          builder, loc, {resultType}, {barrier, args[1], ns}, {}, -          ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%1], %2, %3; " -          "selp.b32 %0, 1, 0, p;", -          {}) -          .getResult(0); +  mlir::Value ret = mlir::NVVM::InlinePtxOp::create( +                        builder, loc, {resultType}, {barrier, args[1], ns}, {}, +                        "{\n" +                        "  .reg .pred p;\n" +                        "  mbarrier.try_wait.shared.b64 p, [%1], %2, %3;\n" +                        "  selp.b32 %0, 1, 0, p;\n" +                        "}", +                        {}) +                        .getResult(0);    mlir::scf::YieldOp::create(builder, loc, ret);    builder.setInsertionPointAfter(whileOp);    return whileOp.getResult(0); @@ -3413,8 +3504,11 @@ IntrinsicLibrary::genBarrierTryWaitSleep(mlir::Type resultType,    auto barrier = builder.createConvert(loc, llvmPtrTy, args[0]);    return mlir::NVVM::InlinePtxOp::create(               builder, loc, {resultType}, {barrier, args[1], args[2]}, {}, -             ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%1], %2, %3; " -             "selp.b32 %0, 1, 0, p;", +             "{\n" +             "  .reg .pred p;\n" +             "  mbarrier.try_wait.shared.b64 p, [%1], %2, %3;\n" +             "  selp.b32 %0, 1, 0, p;\n" +             "}",               {})        .getResult(0);  } @@ -9455,7 +9549,7 @@ void IntrinsicLibrary::genTMABulkS2G(llvm::ArrayRef<fir::ExtendedValue> args) {        builder, loc, dst, src, fir::getBase(args[2]), {}, {});    mlir::NVVM::InlinePtxOp::create(builder, loc, mlir::TypeRange{}, {}, {}, -                                  "cp.async.bulk.commit_group", {}); +                                  "cp.async.bulk.commit_group;", {});    mlir::NVVM::CpAsyncBulkWaitGroupOp::create(builder, loc,                                               builder.getI32IntegerAttr(0), {});  } @@ -9471,7 +9565,7 @@ static void genTMABulkStore(fir::FirOpBuilder &builder, mlir::Location loc,    mlir::NVVM::CpAsyncBulkSharedCTAToGlobalOp::create(builder, loc, dst, src,                                                       size, {}, {});    mlir::NVVM::InlinePtxOp::create(builder, loc, mlir::TypeRange{}, {}, {}, -                                  "cp.async.bulk.commit_group", {}); +                                  "cp.async.bulk.commit_group;", {});    mlir::NVVM::CpAsyncBulkWaitGroupOp::create(builder, loc,                                               builder.getI32IntegerAttr(0), {});  } diff --git a/flang/module/cudadevice.f90 b/flang/module/cudadevice.f90 index 59af58d..b1aef95 100644 --- a/flang/module/cudadevice.f90 +++ b/flang/module/cudadevice.f90 @@ -1171,6 +1171,27 @@ implicit none      integer(8), intent(inout) :: address      integer(8), value :: val      end function +    attributes(device) pure integer(4) function atomicaddr2(address, val) +      !dir$ ignore_tkr (rd) address, (d) val +      real(2), dimension(2), intent(inout) :: address +      real(2), dimension(2), intent(in) :: val +    end function +  end interface + +  interface atomicaddvector +    attributes(device) pure function atomicadd_r2x2(address, val) result(z) +      !dir$ ignore_tkr (rd) address, (d) val +      real(2), dimension(2), intent(inout) :: address +      real(2), dimension(2), intent(in) :: val +      real(2), dimension(2) :: z +    end function + +    attributes(device) pure function atomicadd_r4x2(address, val) result(z) +      !dir$ ignore_tkr (rd) address, (d) val +      real(4), dimension(2), intent(inout) :: address +      real(4), dimension(2), intent(in) :: val +      real(4), dimension(2) :: z +    end function    end interface    interface atomicsub diff --git a/flang/test/Lower/CUDA/cuda-atomicadd.cuf b/flang/test/Lower/CUDA/cuda-atomicadd.cuf new file mode 100644 index 0000000..baa6cdb --- /dev/null +++ b/flang/test/Lower/CUDA/cuda-atomicadd.cuf @@ -0,0 +1,19 @@ +! RUN: bbc -emit-hlfir -fcuda %s -o - | FileCheck %s + +! Test CUDA Fortran atmoicadd functions available cudadevice module + +attributes(global) subroutine atomicaddvector_r2() +  real(2), device :: a(2), tmp1(2), tmp2(2) +  tmp1 = atomicAddVector(a, tmp2) +end subroutine + +! CHECK-LABEL: func.func @_QPatomicaddvector_r2() attributes {cuf.proc_attr = #cuf.cuda_proc<global>} +! CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, vector<2xf16> + +attributes(global) subroutine atomicaddvector_r4() +  real(4), device :: a(2), tmp1(2), tmp2(2) +  tmp1 = atomicAddVector(a, tmp2) +end subroutine + +! CHECK-LABEL: func.func @_QPatomicaddvector_r4() attributes {cuf.proc_attr = #cuf.cuda_proc<global>} +! CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, vector<2xf32> diff --git a/flang/test/Lower/CUDA/cuda-device-proc.cuf b/flang/test/Lower/CUDA/cuda-device-proc.cuf index 09b4302..038aa0a 100644 --- a/flang/test/Lower/CUDA/cuda-device-proc.cuf +++ b/flang/test/Lower/CUDA/cuda-device-proc.cuf @@ -14,6 +14,8 @@ attributes(global) subroutine devsub()    integer :: smalltime    integer(4) :: res, offset    integer(8) :: resl +  real(2) :: r2a(2) +  real(2) :: tmp2(2)    integer :: tid    tid = threadIdx%x @@ -34,6 +36,7 @@ attributes(global) subroutine devsub()    al = atomicadd(al, 1_8)    af = atomicadd(af, 1.0_4)    ad = atomicadd(ad, 1.0_8) +  ai = atomicadd(r2a, tmp2)    ai = atomicsub(ai, 1_4)    al = atomicsub(al, 1_8) @@ -128,6 +131,7 @@ end  ! CHECK: %{{.*}} = llvm.atomicrmw add  %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, i64  ! CHECK: %{{.*}} = llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, f32  ! CHECK: %{{.*}} = llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, f64 +! CHECK: %{{.*}} = llvm.atomicrmw fadd %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, vector<2xf16>  ! CHECK: %{{.*}} = llvm.atomicrmw sub  %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, i32  ! CHECK: %{{.*}} = llvm.atomicrmw sub  %{{.*}}, %{{.*}} seq_cst : !llvm.ptr, i64 @@ -440,7 +444,7 @@ end subroutine  ! CHECK: %[[LLVM_PTR:.*]] = fir.convert %[[DECL_SHARED]]#0 : (!fir.ref<i64>) -> !llvm.ptr  ! CHECK: %[[SHARED_PTR:.*]] = llvm.addrspacecast %[[LLVM_PTR]] : !llvm.ptr to !llvm.ptr<3> -! CHECK: nvvm.mbarrier.arrive.expect_tx %[[SHARED_PTR]], %{{.*}} : !llvm.ptr<3>, i32 +! CHECK: %{{.*}} = nvvm.inline_ptx "mbarrier.arrive.expect_tx.release.cta.shared::cta.b64 %{{.*}}, [%{{.*}}], %{{.*}};" ro(%{{.*}}, %{{.*}} : !llvm.ptr<3>, i32) -> i64  attributes(global) subroutine test_fence() @@ -490,7 +494,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_bulk_s2g  ! CHECL: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0  attributes(device) subroutine testAtomicCasLoop(aa, n) @@ -515,7 +519,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_barrier_try_wait()  ! CHECK: scf.while -! CHECK: %{{.*}} = nvvm.inline_ptx ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%{{.*}}], %{{.*}}, %{{.*}}; selp.b32 %{{.*}}, 1, 0, p;" ro(%{{.*}}, %{{.*}}, %c1000000{{.*}} : !llvm.ptr, i64, i32) -> i32 +! CHECK: %{{.*}} = nvvm.inline_ptx "{\0A  .reg .pred p;\0A  mbarrier.try_wait.shared.b64 p, [%{{.*}}], %{{.*}}, %{{.*}};\0A  selp.b32 %{{.*}}, 1, 0, p;\0A}" ro(%{{.*}}, %{{.*}}, %{{.*}} : !llvm.ptr, i64, i32) -> i32  attributes(global) subroutine test_barrier_try_wait_sleep()    integer :: istat @@ -526,7 +530,7 @@ attributes(global) subroutine test_barrier_try_wait_sleep()  end subroutine  ! CHECK-LABEL: func.func @_QPtest_barrier_try_wait_sleep() -! CHECK: %{{.*}} = nvvm.inline_ptx ".reg .pred p; mbarrier.try_wait.shared.b64 p, [%{{.*}}], %{{.*}}, %{{.*}}; selp.b32 %0, 1, 0, p;" ro(%{{.*}}, %{{.*}}, %{{.*}} : !llvm.ptr, i64, i32) -> i32 +! CHECK: %{{.*}} = nvvm.inline_ptx "{\0A  .reg .pred p;\0A  mbarrier.try_wait.shared.b64 p, [%{{.*}}], %{{.*}}, %{{.*}};\0A  selp.b32 %{{.*}}, 1, 0, p;\0A}" ro(%{{.*}}, %{{.*}}, %{{.*}} : !llvm.ptr, i64, i32) -> i32  attributes(global) subroutine test_tma_bulk_load_c4(a, n)    integer(8), shared :: barrier1 @@ -671,7 +675,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_c4  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0  attributes(global) subroutine test_tma_bulk_store_c8(c, n) @@ -684,7 +688,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_c8  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0  attributes(global) subroutine test_tma_bulk_store_i4(c, n) @@ -697,7 +701,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_i4  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0  attributes(global) subroutine test_tma_bulk_store_i8(c, n) @@ -710,7 +714,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_i8  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0 @@ -724,7 +728,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_r2  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0  attributes(global) subroutine test_tma_bulk_store_r4(c, n) @@ -737,7 +741,7 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_r4  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0  attributes(global) subroutine test_tma_bulk_store_r8(c, n) @@ -750,5 +754,5 @@ end subroutine  ! CHECK-LABEL: func.func @_QPtest_tma_bulk_store_r8  ! CHECK: nvvm.cp.async.bulk.global.shared.cta %{{.*}}, %{{.*}}, %{{.*}} : <1>, <3> -! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group" +! CHECK: nvvm.inline_ptx "cp.async.bulk.commit_group;"  ! CHECK: nvvm.cp.async.bulk.wait_group 0 diff --git a/libcxx/docs/TestingLibcxx.rst b/libcxx/docs/TestingLibcxx.rst index dbe6948..e15c5b1 100644 --- a/libcxx/docs/TestingLibcxx.rst +++ b/libcxx/docs/TestingLibcxx.rst @@ -451,7 +451,7 @@ Instead use:  .. code-block:: cpp -   // UNSUPPORTED: std-at-least-c++26 +   // REQUIRES: std-at-least-c++26  There is no corresponding ``std-at-most-c++23``. This could be useful when  tests are only valid for a small set of standard versions. For example, a diff --git a/libcxx/include/__exception/exception_ptr.h b/libcxx/include/__exception/exception_ptr.h index 796fa92..e78126e 100644 --- a/libcxx/include/__exception/exception_ptr.h +++ b/libcxx/include/__exception/exception_ptr.h @@ -16,6 +16,8 @@  #include <__memory/construct_at.h>  #include <__type_traits/decay.h>  #include <__type_traits/is_pointer.h> +#include <__utility/move.h> +#include <__utility/swap.h>  #include <cstdlib>  #include <typeinfo> @@ -23,6 +25,9 @@  #  pragma GCC system_header  #endif +_LIBCPP_PUSH_MACROS +#include <__undef_macros> +  #ifndef _LIBCPP_ABI_MICROSOFT  #  if _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION @@ -57,6 +62,8 @@ _LIBCPP_BEGIN_UNVERSIONED_NAMESPACE_STD  #ifndef _LIBCPP_ABI_MICROSOFT +inline _LIBCPP_HIDE_FROM_ABI void swap(exception_ptr& __x, exception_ptr& __y) _NOEXCEPT; +  class _LIBCPP_EXPORTED_FROM_ABI exception_ptr {    void* __ptr_; @@ -75,7 +82,15 @@ public:    _LIBCPP_HIDE_FROM_ABI exception_ptr(nullptr_t) _NOEXCEPT : __ptr_() {}    exception_ptr(const exception_ptr&) _NOEXCEPT; +  _LIBCPP_HIDE_FROM_ABI exception_ptr(exception_ptr&& __other) _NOEXCEPT : __ptr_(__other.__ptr_) { +    __other.__ptr_ = nullptr; +  }    exception_ptr& operator=(const exception_ptr&) _NOEXCEPT; +  _LIBCPP_HIDE_FROM_ABI exception_ptr& operator=(exception_ptr&& __other) _NOEXCEPT { +    exception_ptr __tmp(std::move(__other)); +    std::swap(__tmp, *this); +    return *this; +  }    ~exception_ptr() _NOEXCEPT;    _LIBCPP_HIDE_FROM_ABI explicit operator bool() const _NOEXCEPT { return __ptr_ != nullptr; } @@ -88,10 +103,16 @@ public:      return !(__x == __y);    } +  friend _LIBCPP_HIDE_FROM_ABI void swap(exception_ptr& __x, exception_ptr& __y) _NOEXCEPT; +    friend _LIBCPP_EXPORTED_FROM_ABI exception_ptr current_exception() _NOEXCEPT;    friend _LIBCPP_EXPORTED_FROM_ABI void rethrow_exception(exception_ptr);  }; +inline _LIBCPP_HIDE_FROM_ABI void swap(exception_ptr& __x, exception_ptr& __y) _NOEXCEPT { +  std::swap(__x.__ptr_, __y.__ptr_); +} +  #  if _LIBCPP_HAS_EXCEPTIONS  #    if _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION  template <class _Ep> @@ -201,4 +222,6 @@ _LIBCPP_HIDE_FROM_ABI exception_ptr make_exception_ptr(_Ep __e) _NOEXCEPT {  #endif // _LIBCPP_ABI_MICROSOFT  _LIBCPP_END_UNVERSIONED_NAMESPACE_STD +_LIBCPP_POP_MACROS +  #endif // _LIBCPP___EXCEPTION_EXCEPTION_PTR_H diff --git a/libcxx/include/string b/libcxx/include/string index 33382c7..ede4246 100644 --- a/libcxx/include/string +++ b/libcxx/include/string @@ -644,6 +644,7 @@ basic_string<char32_t> operator""s( const char32_t *str, size_t len );  #  include <__utility/forward.h>  #  include <__utility/is_pointer_in_range.h>  #  include <__utility/move.h> +#  include <__utility/no_destroy.h>  #  include <__utility/scope_guard.h>  #  include <__utility/swap.h>  #  include <climits> @@ -918,6 +919,7 @@ private:      __rep() = default;      _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __rep(__short __r) : __s(__r) {}      _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __rep(__long __r) : __l(__r) {} +    _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __rep(__uninitialized_tag) {}    };    _LIBCPP_COMPRESSED_PAIR(__rep, __rep_, allocator_type, __alloc_); @@ -1210,7 +1212,10 @@ public:    }  #  endif // _LIBCPP_CXX03_LANG -  inline _LIBCPP_CONSTEXPR_SINCE_CXX20 ~basic_string() { __reset_internal_buffer(); } +  // TODO(boomanaiden154): Once we mark this in destructors as dead on return, +  // we can use a normal call to __reset_internal_buffer and remove the extra +  // __rep constructor. +  inline _LIBCPP_CONSTEXPR_SINCE_CXX20 ~basic_string() { __reset_internal_buffer(__rep(__uninitialized_tag())); }    _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 operator __self_view() const _NOEXCEPT {      return __self_view(typename __self_view::__assume_valid(), data(), size()); diff --git a/libcxx/modules/std/exception.inc b/libcxx/modules/std/exception.inc index 02b0f80..3dbc011 100644 --- a/libcxx/modules/std/exception.inc +++ b/libcxx/modules/std/exception.inc @@ -18,6 +18,7 @@ export namespace std {    using std::rethrow_exception;    using std::rethrow_if_nested;    using std::set_terminate; +  using std::swap;    using std::terminate;    using std::terminate_handler;    using std::throw_with_nested; diff --git a/libcxx/test/std/language.support/support.exception/propagation/exception_ptr.pass.cpp b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr.pass.cpp index 0aded33..7e25d40 100644 --- a/libcxx/test/std/language.support/support.exception/propagation/exception_ptr.pass.cpp +++ b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr.pass.cpp @@ -14,7 +14,6 @@  #include <exception>  #include <cassert> -#include <type_traits>  #include "test_macros.h" diff --git a/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_move_assignment.pass.cpp b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_move_assignment.pass.cpp new file mode 100644 index 0000000..6882bc6 --- /dev/null +++ b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_move_assignment.pass.cpp @@ -0,0 +1,45 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: no-exceptions, c++03 + +// <exception> + +// typedef unspecified exception_ptr; + +// Test the move assignment of exception_ptr + +#include <exception> +#include <utility> +#include <cassert> + +#include "test_macros.h" + +int main(int, char**) { +  std::exception_ptr p = std::make_exception_ptr(42); +  std::exception_ptr p2{p}; +  assert(p2 == p); +  // Under test: the move assignment +  std::exception_ptr p3; +  p3 = std::move(p2); +  assert(p3 == p); +// `p2` was moved from. In libc++ it will be nullptr, but +// this is not guaranteed by the standard. +#if defined(_LIBCPP_VERSION) && !defined(_LIBCPP_ABI_MICROSOFT) +  assert(p2 == nullptr); +  assert(p2 == nullptr); +#endif + +  try { +    std::rethrow_exception(p3); +  } catch (int e) { +    assert(e == 42); +  } + +  return 0; +} diff --git a/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_move_ctr.pass.cpp b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_move_ctr.pass.cpp new file mode 100644 index 0000000..122e229 --- /dev/null +++ b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_move_ctr.pass.cpp @@ -0,0 +1,43 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: no-exceptions, c++03 + +// <exception> + +// typedef unspecified exception_ptr; + +// Test the move constructor of exception_ptr + +#include <exception> +#include <utility> +#include <cassert> + +#include "test_macros.h" + +int main(int, char**) { +  std::exception_ptr p = std::make_exception_ptr(42); +  std::exception_ptr p2{p}; +  assert(p2 == p); +  // Under test: The move constructor +  std::exception_ptr p3{std::move(p2)}; +  assert(p3 == p); +// `p2` was moved from. In libc++ it will be nullptr, but +// this is not guaranteed by the standard. +#if defined(_LIBCPP_VERSION) && !defined(_LIBCPP_ABI_MICROSOFT) +  assert(p2 == nullptr); +#endif + +  try { +    std::rethrow_exception(p3); +  } catch (int e) { +    assert(e == 42); +  } + +  return 0; +} diff --git a/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_swap.pass.cpp b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_swap.pass.cpp new file mode 100644 index 0000000..82b4713 --- /dev/null +++ b/libcxx/test/std/language.support/support.exception/propagation/exception_ptr_swap.pass.cpp @@ -0,0 +1,40 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// UNSUPPORTED: no-exceptions + +// <exception> + +// typedef unspecified exception_ptr; + +// Test swapping of exception_ptr + +#include <exception> +#include <utility> +#include <cassert> + +#include "test_macros.h" + +int main(int, char**) { +  std::exception_ptr p21 = std::make_exception_ptr(42); +  std::exception_ptr p42 = std::make_exception_ptr(21); +  std::swap(p42, p21); + +  try { +    std::rethrow_exception(p21); +  } catch (int e) { +    assert(e == 21); +  } +  try { +    std::rethrow_exception(p42); +  } catch (int e) { +    assert(e == 42); +  } + +  return 0; +} diff --git a/lld/test/wasm/runtime-relocations-himem.s b/lld/test/wasm/runtime-relocations-himem.s new file mode 100644 index 0000000..a12a93a --- /dev/null +++ b/lld/test/wasm/runtime-relocations-himem.s @@ -0,0 +1,60 @@ +## Verifies runtime relocation code for addresses over 2gb works correctly. +## We have had issues with LEB encoding of address over 2gb in i32.const +## instruction leading to invalid binaries. + +# RUN: llvm-mc -filetype=obj -triple=wasm32-unknown-unknown -o %t.o %s +# RUN: wasm-ld --global-base=2147483648 --experimental-pic --unresolved-symbols=import-dynamic -no-gc-sections --shared-memory --no-entry -o %t.wasm %t.o +# XUN: obj2yaml %t.wasm | FileCheck %s +# RUN: llvm-objdump -d --no-show-raw-insn --no-leading-addr %t.wasm | FileCheck %s -- + +.globl tls_sym +.globl data_sym +.globl _start +.globaltype __tls_base, i32 + +_start: +  .functype _start () -> () +  global.get __tls_base +  i32.const tls_sym@TLSREL +  i32.add +  drop +  i32.const data_sym +  drop +  end_function + +.section tls_sec,"T",@ +.p2align  2 +tls_sym: +  .int32 0 +  .int32 extern_sym +  .size tls_sym, 8 + +.section data_sec,"",@ +.p2align  2 +data_sym: +  .int32 0 +  .int32 extern_sym +  .size data_sym, 8 + +.section  .custom_section.target_features,"",@ +  .int8 2 +  .int8 43 +  .int8 7 +  .ascii  "atomics" +  .int8 43 +  .int8 11 +  .ascii  "bulk-memory" + +# CHECK: <__wasm_apply_data_relocs>: +# CHECK-EMPTY: +# CHECK-NEXT:  i32.const -2147483636 +# CHECK-NEXT:  global.get 0 +# CHECK-NEXT:  i32.store 0 +# CHECK-NEXT:  end + +# CHECK: <__wasm_apply_tls_relocs>: +# CHECK-EMPTY: +# CHECK-NEXT:  i32.const -2147483644 +# CHECK-NEXT:  global.get 0 +# CHECK-NEXT:  i32.store 0 +# CHECK-NEXT:  end diff --git a/lld/wasm/InputChunks.cpp b/lld/wasm/InputChunks.cpp index 44927e7..14e02e6 100644 --- a/lld/wasm/InputChunks.cpp +++ b/lld/wasm/InputChunks.cpp @@ -423,8 +423,6 @@ bool InputChunk::generateRelocationCode(raw_ostream &os) const {    bool is64 = ctx.arg.is64.value_or(false);    bool generated = false; -  unsigned opcode_ptr_const = is64 ? WASM_OPCODE_I64_CONST -                                   : WASM_OPCODE_I32_CONST;    unsigned opcode_ptr_add = is64 ? WASM_OPCODE_I64_ADD                                   : WASM_OPCODE_I32_ADD; @@ -451,8 +449,7 @@ bool InputChunk::generateRelocationCode(raw_ostream &os) const {                        << " output offset=" << offset << "\n");      // Calculate the address at which to apply the relocation -    writeU8(os, opcode_ptr_const, "CONST"); -    writeSleb128(os, offset, "offset"); +    writePtrConst(os, offset, is64, "offset");      // In PIC mode we need to add the __memory_base      if (ctx.isPic) { @@ -466,8 +463,6 @@ bool InputChunk::generateRelocationCode(raw_ostream &os) const {      // Now figure out what we want to store at this location      bool is64 = relocIs64(rel.Type); -    unsigned opcode_reloc_const = -        is64 ? WASM_OPCODE_I64_CONST : WASM_OPCODE_I32_CONST;      unsigned opcode_reloc_add =          is64 ? WASM_OPCODE_I64_ADD : WASM_OPCODE_I32_ADD;      unsigned opcode_reloc_store = @@ -477,8 +472,7 @@ bool InputChunk::generateRelocationCode(raw_ostream &os) const {        writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");        writeUleb128(os, sym->getGOTIndex(), "global index");        if (rel.Addend) { -        writeU8(os, opcode_reloc_const, "CONST"); -        writeSleb128(os, rel.Addend, "addend"); +        writePtrConst(os, rel.Addend, is64, "addend");          writeU8(os, opcode_reloc_add, "ADD");        }      } else { @@ -491,8 +485,8 @@ bool InputChunk::generateRelocationCode(raw_ostream &os) const {          baseSymbol = ctx.sym.tlsBase;        writeU8(os, WASM_OPCODE_GLOBAL_GET, "GLOBAL_GET");        writeUleb128(os, baseSymbol->getGlobalIndex(), "base"); -      writeU8(os, opcode_reloc_const, "CONST"); -      writeSleb128(os, file->calcNewValue(rel, tombstone, this), "offset"); +      writePtrConst(os, file->calcNewValue(rel, tombstone, this), is64, +                    "offset");        writeU8(os, opcode_reloc_add, "ADD");      } diff --git a/lld/wasm/SyntheticSections.cpp b/lld/wasm/SyntheticSections.cpp index e119270..399a508 100644 --- a/lld/wasm/SyntheticSections.cpp +++ b/lld/wasm/SyntheticSections.cpp @@ -434,8 +434,6 @@ void GlobalSection::addInternalGOTEntry(Symbol *sym) {  void GlobalSection::generateRelocationCode(raw_ostream &os, bool TLS) const {    assert(!ctx.arg.extendedConst);    bool is64 = ctx.arg.is64.value_or(false); -  unsigned opcode_ptr_const = is64 ? WASM_OPCODE_I64_CONST -                                   : WASM_OPCODE_I32_CONST;    unsigned opcode_ptr_add = is64 ? WASM_OPCODE_I64_ADD                                   : WASM_OPCODE_I32_ADD; @@ -452,8 +450,7 @@ void GlobalSection::generateRelocationCode(raw_ostream &os, bool TLS) const {          writeUleb128(os, ctx.sym.memoryBase->getGlobalIndex(), "__memory_base");        // Add the virtual address of the data symbol -      writeU8(os, opcode_ptr_const, "CONST"); -      writeSleb128(os, d->getVA(), "offset"); +      writePtrConst(os, d->getVA(), is64, "offset");      } else if (auto *f = dyn_cast<FunctionSymbol>(sym)) {        if (f->isStub)          continue; @@ -462,8 +459,7 @@ void GlobalSection::generateRelocationCode(raw_ostream &os, bool TLS) const {        writeUleb128(os, ctx.sym.tableBase->getGlobalIndex(), "__table_base");        // Add the table index to __table_base -      writeU8(os, opcode_ptr_const, "CONST"); -      writeSleb128(os, f->getTableIndex(), "offset"); +      writePtrConst(os, f->getTableIndex(), is64, "offset");      } else {        assert(isa<UndefinedData>(sym) || isa<SharedData>(sym));        continue; diff --git a/lldb/CMakeLists.txt b/lldb/CMakeLists.txt index e3b72e9..01b5546 100644 --- a/lldb/CMakeLists.txt +++ b/lldb/CMakeLists.txt @@ -87,6 +87,12 @@ if (LLDB_ENABLE_PYTHON)        set(LLDB_PYTHON_EXT_SUFFIX "_d${LLDB_PYTHON_EXT_SUFFIX}")      endif()    endif() +  if(TARGET Python3::Python) +    get_target_property(_Python3_LIB_PATH Python3::Python IMPORTED_LIBRARY_LOCATION) +    if(_Python3_LIB_PATH) +      get_filename_component(LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME "${_Python3_LIB_PATH}" NAME) +    endif() +  endif()  endif ()  if (LLDB_ENABLE_LUA) diff --git a/lldb/include/lldb/Target/InstrumentationRuntime.h b/lldb/include/lldb/Target/InstrumentationRuntime.h index a6121c2..d249952 100644 --- a/lldb/include/lldb/Target/InstrumentationRuntime.h +++ b/lldb/include/lldb/Target/InstrumentationRuntime.h @@ -73,6 +73,13 @@ protected:    /// is guaranteed to be loaded.    virtual void Activate() = 0; +  /// \return true if `CheckIfRuntimeIsValid` should be called on all modules. +  /// In this case the return value of `GetPatternForRuntimeLibrary` will be +  /// ignored. Return false if `CheckIfRuntimeIsValid` should only be called +  /// for modules whose name matches `GetPatternForRuntimeLibrary`. +  /// +  virtual bool MatchAllModules() { return false; } +  public:    static void ModulesDidLoad(lldb_private::ModuleList &module_list,                               Process *process, diff --git a/lldb/include/lldb/Utility/Stream.h b/lldb/include/lldb/Utility/Stream.h index 82774d5..1345555 100644 --- a/lldb/include/lldb/Utility/Stream.h +++ b/lldb/include/lldb/Utility/Stream.h @@ -300,6 +300,12 @@ public:    ///     The current indentation level.    unsigned GetIndentLevel() const; +  /// Set the current indentation level. +  /// +  /// \param[in] level +  ///     The new indentation level. +  void SetIndentLevel(unsigned level); +    /// Indent the current line in the stream.    ///    /// Indent the current line using the current indentation level and print an @@ -315,6 +321,20 @@ public:    /// Increment the current indentation level.    void IndentMore(unsigned amount = 2); +  struct IndentScope { +    IndentScope(Stream &stream) +        : m_stream(stream), m_original_indent_level(stream.GetIndentLevel()) {} +    ~IndentScope() { m_stream.SetIndentLevel(m_original_indent_level); } + +  private: +    Stream &m_stream; +    unsigned m_original_indent_level; +  }; + +  /// Create an indentation scope that restores the original indent level when +  /// the object goes out of scope (RAII). +  IndentScope MakeIndentScope(unsigned indent_amount = 2); +    /// Output an offset value.    ///    /// Put an offset \a uval out to the stream using the printf format in \a @@ -364,12 +384,6 @@ public:    ///     address and pointer values.    void SetAddressByteSize(uint32_t addr_size); -  /// Set the current indentation level. -  /// -  /// \param[in] level -  ///     The new indentation level. -  void SetIndentLevel(unsigned level); -    /// Output a SLEB128 number to the stream.    ///    /// Put an SLEB128 \a uval out to the stream using the printf format in \a diff --git a/lldb/source/Plugins/ABI/RISCV/ABISysV_riscv.cpp b/lldb/source/Plugins/ABI/RISCV/ABISysV_riscv.cpp index ff37b48..a5547a4 100644 --- a/lldb/source/Plugins/ABI/RISCV/ABISysV_riscv.cpp +++ b/lldb/source/Plugins/ABI/RISCV/ABISysV_riscv.cpp @@ -798,6 +798,8 @@ bool ABISysV_riscv::RegisterIsCalleeSaved(const RegisterInfo *reg_info) {            .Cases({"f8", "f9", "f18", "f19", "f20", "f21", "f22", "f23"},                   is_hw_fp)            .Cases({"f24", "f25", "f26", "f27"}, is_hw_fp) +          // vlenb is constant and needed for vector unwinding. +          .Case("vlenb", true)            .Default(false);    return is_callee_saved; diff --git a/lldb/source/Target/InstrumentationRuntime.cpp b/lldb/source/Target/InstrumentationRuntime.cpp index 7e58e8b..d9800a8 100644 --- a/lldb/source/Target/InstrumentationRuntime.cpp +++ b/lldb/source/Target/InstrumentationRuntime.cpp @@ -55,7 +55,8 @@ void InstrumentationRuntime::ModulesDidLoad(        return IterationAction::Continue;      const RegularExpression &runtime_regex = GetPatternForRuntimeLibrary(); -    if (runtime_regex.Execute(file_spec.GetFilename().GetCString()) || +    if (MatchAllModules() || +        runtime_regex.Execute(file_spec.GetFilename().GetCString()) ||          module_sp->IsExecutable()) {        if (CheckIfRuntimeIsValid(module_sp)) {          SetRuntimeModuleSP(module_sp); diff --git a/lldb/source/Target/Target.cpp b/lldb/source/Target/Target.cpp index 1e43094..a23091a 100644 --- a/lldb/source/Target/Target.cpp +++ b/lldb/source/Target/Target.cpp @@ -3962,9 +3962,7 @@ void Target::StopHook::GetDescription(Stream &s,      return;    } -  unsigned indent_level = s.GetIndentLevel(); - -  s.SetIndentLevel(indent_level + 2); +  auto indent_scope = s.MakeIndentScope();    s.Printf("Hook: %" PRIu64 "\n", GetID());    if (m_active) @@ -3978,19 +3976,17 @@ void Target::StopHook::GetDescription(Stream &s,    if (m_specifier_sp) {      s.Indent();      s.PutCString("Specifier:\n"); -    s.SetIndentLevel(indent_level + 4); +    auto indent_scope = s.MakeIndentScope();      m_specifier_sp->GetDescription(&s, level); -    s.SetIndentLevel(indent_level + 2);    }    if (m_thread_spec_up) {      StreamString tmp;      s.Indent("Thread:\n");      m_thread_spec_up->GetDescription(&tmp, level); -    s.SetIndentLevel(indent_level + 4); +    auto indent_scope = s.MakeIndentScope();      s.Indent(tmp.GetString());      s.PutCString("\n"); -    s.SetIndentLevel(indent_level + 2);    }    GetSubclassDescription(s, level);  } @@ -4003,14 +3999,13 @@ void Target::StopHookCommandLine::GetSubclassDescription(        s.PutCString(m_commands.GetStringAtIndex(0));      return;    } -  s.Indent("Commands: \n"); -  s.SetIndentLevel(s.GetIndentLevel() + 4); +  s.Indent("Commands:\n"); +  auto indent_scope = s.MakeIndentScope(4);    uint32_t num_commands = m_commands.GetSize();    for (uint32_t i = 0; i < num_commands; i++) {      s.Indent(m_commands.GetStringAtIndex(i));      s.PutCString("\n");    } -  s.SetIndentLevel(s.GetIndentLevel() - 4);  }  // Target::StopHookCommandLine @@ -4145,7 +4140,7 @@ void Target::StopHookScripted::GetSubclassDescription(      return;    s.Indent("Args:\n"); -  s.SetIndentLevel(s.GetIndentLevel() + 4); +  auto indent_scope = s.MakeIndentScope(4);    auto print_one_element = [&s](llvm::StringRef key,                                  StructuredData::Object *object) { @@ -4155,8 +4150,6 @@ void Target::StopHookScripted::GetSubclassDescription(    };    as_dict->ForEach(print_one_element); - -  s.SetIndentLevel(s.GetIndentLevel() - 4);  }  static constexpr OptionEnumValueElement g_dynamic_value_types[] = { diff --git a/lldb/source/Utility/Stream.cpp b/lldb/source/Utility/Stream.cpp index 89dce9f..e9632c3 100644 --- a/lldb/source/Utility/Stream.cpp +++ b/lldb/source/Utility/Stream.cpp @@ -202,6 +202,14 @@ void Stream::IndentLess(unsigned amount) {      m_indent_level = 0;  } +// Create an indentation scope that restores the original indent level when the +// object goes out of scope (RAII). +Stream::IndentScope Stream::MakeIndentScope(unsigned indent_amount) { +  IndentScope indent_scope(*this); +  IndentMore(indent_amount); +  return indent_scope; +} +  // Get the address size in bytes  uint32_t Stream::GetAddressByteSize() const { return m_addr_size; } diff --git a/lldb/test/API/commands/expression/weak_symbols/TestWeakSymbols.py b/lldb/test/API/commands/expression/weak_symbols/TestWeakSymbols.py index 50efecb..bed129a 100644 --- a/lldb/test/API/commands/expression/weak_symbols/TestWeakSymbols.py +++ b/lldb/test/API/commands/expression/weak_symbols/TestWeakSymbols.py @@ -15,7 +15,7 @@ class TestWeakSymbolsInExpressions(TestBase):      NO_DEBUG_INFO_TESTCASE = True      @skipUnlessDarwin -    @skipIf(compiler="clang", compiler_version=["<", "7.0"]) +    @skipIf(compiler="clang", compiler_version=["<", "19.0"])      def test_weak_symbol_in_expr(self):          """Tests that we can refer to weak symbols in expressions."""          self.build() diff --git a/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py b/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py index 2f942da..d8a729b 100644 --- a/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py +++ b/lldb/test/API/lang/cpp/libcxx-internals-recognizer/TestLibcxxInternalsRecognizer.py @@ -9,7 +9,7 @@ class LibCxxInternalsRecognizerTestCase(TestBase):      NO_DEBUG_INFO_TESTCASE = True      @add_test_categories(["libc++"]) -    @skipIf(compiler="clang", compiler_version=["<=", "19.0"]) +    @skipIf(compiler="clang", compiler_version=["<", "19.0"])      def test_frame_recognizer(self):          """Test that implementation details of libc++ are hidden"""          self.build() diff --git a/lldb/test/API/lang/objc/modules-auto-import/TestModulesAutoImport.py b/lldb/test/API/lang/objc/modules-auto-import/TestModulesAutoImport.py index 142d27d..f3558f6 100644 --- a/lldb/test/API/lang/objc/modules-auto-import/TestModulesAutoImport.py +++ b/lldb/test/API/lang/objc/modules-auto-import/TestModulesAutoImport.py @@ -16,6 +16,7 @@ class ObjCModulesAutoImportTestCase(TestBase):          self.line = line_number("main.m", "// Set breakpoint 0 here.")      @skipIf(macos_version=["<", "10.12"]) +    @skipIf(compiler="clang", compiler_version=["<", "19.0"])      def test_expr(self):          self.build()          exe = self.getBuildArtifact("a.out") diff --git a/lldb/test/API/lang/objc/modules-objc-property/TestModulesObjCProperty.py b/lldb/test/API/lang/objc/modules-objc-property/TestModulesObjCProperty.py index 3be064a..657a710 100644 --- a/lldb/test/API/lang/objc/modules-objc-property/TestModulesObjCProperty.py +++ b/lldb/test/API/lang/objc/modules-objc-property/TestModulesObjCProperty.py @@ -6,6 +6,7 @@ from lldbsuite.test import lldbutil  class TestCase(TestBase):      @no_debug_info_test +    @skipIf(compiler="clang", compiler_version=["<", "19.0"])      def test_conflicting_properties(self):          """Tests receiving two properties with the same name from modules."""          self.build() diff --git a/lldb/test/Shell/DAP/TestClientLauncher.test b/lldb/test/Shell/DAP/TestClientLauncher.test new file mode 100644 index 0000000..a79a940 --- /dev/null +++ b/lldb/test/Shell/DAP/TestClientLauncher.test @@ -0,0 +1,2 @@ +# RUN: lldb-dap --client vscode-url -- /path/to/foo | FileCheck %s +# CHECK: vscode://llvm-vs-code-extensions.lldb-dap/start?program=%2Fpath%2Fto%2Ffoo diff --git a/lldb/test/Shell/ExecControl/StopHook/stop-hook-list-format.test b/lldb/test/Shell/ExecControl/StopHook/stop-hook-list-format.test new file mode 100644 index 0000000..a955780 --- /dev/null +++ b/lldb/test/Shell/ExecControl/StopHook/stop-hook-list-format.test @@ -0,0 +1,36 @@ +# Test format (e.g., indentation) when printing the list of stop hooks. +# +# RUN: %lldb -b -s %s | FileCheck %s --match-full-lines --strict-whitespace + +# Create some stop hooks +target stop-hook add -o 'print "Hello"' --auto-continue true --at-initial-stop true +target stop-hook add -o 'print "world,"' -o 'print "nice"' --file 'my_file' +target stop-hook add -o 'print "weather!"'  --classname 'MyClass' --thread-name 'my_thread' + +# Print hooks +target stop-hook list + +# CHECK:(lldb) target stop-hook list +# CHECK:Hook: 1 +# CHECK:  State: enabled +# CHECK:  AutoContinue on +# CHECK:  Commands: +# CHECK:      print "Hello" +# CHECK-EMPTY: +# CHECK:Hook: 2 +# CHECK:  State: enabled +# CHECK:  Specifier: +# CHECK:    File: my_file. +# CHECK:  Commands: +# CHECK:      print "world," +# CHECK:      print "nice" +# CHECK-EMPTY: +# CHECK:Hook: 3 +# CHECK:  State: enabled +# CHECK:  Specifier: +# CHECK:    Class name: MyClass. +# CHECK:  Thread: +# CHECK:    thread name: "my_thread"  +# CHECK:  Commands: +# CHECK:      print "weather!" +# CHECK-EMPTY: diff --git a/lldb/tools/driver/CMakeLists.txt b/lldb/tools/driver/CMakeLists.txt index 67956af..efe5150 100644 --- a/lldb/tools/driver/CMakeLists.txt +++ b/lldb/tools/driver/CMakeLists.txt @@ -37,6 +37,9 @@ add_dependencies(lldb  if(DEFINED LLDB_PYTHON_DLL_RELATIVE_PATH)    target_compile_definitions(lldb PRIVATE LLDB_PYTHON_DLL_RELATIVE_PATH="${LLDB_PYTHON_DLL_RELATIVE_PATH}")  endif() +if(DEFINED LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME) +  target_compile_definitions(lldb PRIVATE LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME="${LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME}") +endif()  if(LLDB_BUILD_FRAMEWORK)    # In the build-tree, we know the exact path to the framework directory. diff --git a/lldb/tools/driver/Driver.cpp b/lldb/tools/driver/Driver.cpp index 733331f..bebf1a7 100644 --- a/lldb/tools/driver/Driver.cpp +++ b/lldb/tools/driver/Driver.cpp @@ -433,7 +433,8 @@ SBError Driver::ProcessArgs(const opt::InputArgList &args, bool &exiting) {    return error;  } -#if defined(_WIN32) && defined(LLDB_PYTHON_DLL_RELATIVE_PATH) +#ifdef _WIN32 +#ifdef LLDB_PYTHON_DLL_RELATIVE_PATH  /// Returns the full path to the lldb.exe executable.  inline std::wstring GetPathToExecutableW() {    // Iterate until we reach the Windows API maximum path length (32,767). @@ -447,30 +448,73 @@ inline std::wstring GetPathToExecutableW() {    return L"";  } -/// Resolve the full path of the directory defined by +/// \brief Resolve the full path of the directory defined by  /// LLDB_PYTHON_DLL_RELATIVE_PATH. If it exists, add it to the list of DLL  /// search directories. -void AddPythonDLLToSearchPath() { +/// \return `true` if the library was added to the search path. +/// `false` otherwise. +bool AddPythonDLLToSearchPath() {    std::wstring modulePath = GetPathToExecutableW(); -  if (modulePath.empty()) { -    llvm::errs() << "error: unable to find python.dll." << '\n'; -    return; -  } +  if (modulePath.empty()) +    return false;    SmallVector<char, MAX_PATH> utf8Path;    if (sys::windows::UTF16ToUTF8(modulePath.c_str(), modulePath.length(),                                  utf8Path)) -    return; +    return false;    sys::path::remove_filename(utf8Path);    sys::path::append(utf8Path, LLDB_PYTHON_DLL_RELATIVE_PATH);    sys::fs::make_absolute(utf8Path);    SmallVector<wchar_t, 1> widePath;    if (sys::windows::widenPath(utf8Path.data(), widePath)) -    return; +    return false;    if (sys::fs::exists(utf8Path)) -    SetDllDirectoryW(widePath.data()); +    return SetDllDirectoryW(widePath.data()); +  return false; +} +#endif + +#ifdef LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME +/// Returns whether `python3x.dll` is in the DLL search path. +bool IsPythonDLLInPath() { +#define WIDEN2(x) L##x +#define WIDEN(x) WIDEN2(x) +  WCHAR foundPath[MAX_PATH]; +  DWORD result = +      SearchPathW(nullptr, WIDEN(LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME), nullptr, +                  MAX_PATH, foundPath, nullptr); +#undef WIDEN2 +#undef WIDEN + +  return result > 0; +} +#endif + +/// Try to setup the DLL search path for the Python Runtime Library +/// (python3xx.dll). +/// +/// If `LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME` is set, we first check if +/// python3xx.dll is in the search path. If it's not, we try to add it and +/// check for it a second time. +/// If only `LLDB_PYTHON_DLL_RELATIVE_PATH` is set, we try to add python3xx.dll +/// to the search path python.dll is already in the search path or not. +void SetupPythonRuntimeLibrary() { +#ifdef LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME +  if (IsPythonDLLInPath()) +    return; +#ifdef LLDB_PYTHON_DLL_RELATIVE_PATH +  if (AddPythonDLLToSearchPath() && IsPythonDLLInPath()) +    return; +#endif +  llvm::errs() << "error: unable to find '" +               << LLDB_PYTHON_RUNTIME_LIBRARY_FILENAME << "'.\n"; +  return; +#elif defined(LLDB_PYTHON_DLL_RELATIVE_PATH) +  if (!AddPythonDLLToSearchPath()) +    llvm::errs() << "error: unable to find the Python runtime library.\n"; +#endif  }  #endif @@ -776,8 +820,8 @@ int main(int argc, char const *argv[]) {                          "~/Library/Logs/DiagnosticReports/.\n");  #endif -#if defined(_WIN32) && defined(LLDB_PYTHON_DLL_RELATIVE_PATH) -  AddPythonDLLToSearchPath(); +#ifdef _WIN32 +  SetupPythonRuntimeLibrary();  #endif    // Parse arguments. diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt index dd1bbbd..fa940b7 100644 --- a/lldb/tools/lldb-dap/CMakeLists.txt +++ b/lldb/tools/lldb-dap/CMakeLists.txt @@ -5,6 +5,7 @@ set(LLVM_LINK_COMPONENTS Support)  add_lldb_library(lldbDAP    Breakpoint.cpp    BreakpointBase.cpp +  ClientLauncher.cpp    CommandPlugins.cpp    DAP.cpp    DAPError.cpp diff --git a/lldb/tools/lldb-dap/ClientLauncher.cpp b/lldb/tools/lldb-dap/ClientLauncher.cpp new file mode 100644 index 0000000..4cac1d6 --- /dev/null +++ b/lldb/tools/lldb-dap/ClientLauncher.cpp @@ -0,0 +1,74 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "ClientLauncher.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/Support/FormatVariadic.h" + +using namespace lldb_dap; + +std::optional<ClientLauncher::Client> +ClientLauncher::GetClientFrom(llvm::StringRef str) { +  return llvm::StringSwitch<std::optional<ClientLauncher::Client>>(str.lower()) +      .Case("vscode", ClientLauncher::VSCode) +      .Case("vscode-url", ClientLauncher::VSCodeURL) +      .Default(std::nullopt); +} + +std::unique_ptr<ClientLauncher> +ClientLauncher::GetLauncher(ClientLauncher::Client client) { +  switch (client) { +  case ClientLauncher::VSCode: +    return std::make_unique<VSCodeLauncher>(); +  case ClientLauncher::VSCodeURL: +    return std::make_unique<VSCodeURLPrinter>(); +  } +  return nullptr; +} + +std::string VSCodeLauncher::URLEncode(llvm::StringRef str) { +  std::string out; +  llvm::raw_string_ostream os(out); +  for (char c : str) { +    if (std::isalnum(c) || llvm::StringRef("-_.~").contains(c)) +      os << c; +    else +      os << '%' << llvm::utohexstr(c, false, 2); +  } +  return os.str(); +} + +std::string +VSCodeLauncher::GetLaunchURL(const std::vector<llvm::StringRef> args) const { +  assert(!args.empty() && "empty launch args"); + +  std::vector<std::string> encoded_launch_args; +  for (llvm::StringRef arg : args) +    encoded_launch_args.push_back(URLEncode(arg)); + +  const std::string args_str = llvm::join(encoded_launch_args, "&args="); +  return llvm::formatv( +             "vscode://llvm-vs-code-extensions.lldb-dap/start?program={0}", +             args_str) +      .str(); +} + +llvm::Error VSCodeLauncher::Launch(const std::vector<llvm::StringRef> args) { +  const std::string launch_url = GetLaunchURL(args); +  const std::string command = +      llvm::formatv("code --open-url {0}", launch_url).str(); + +  std::system(command.c_str()); +  return llvm::Error::success(); +} + +llvm::Error VSCodeURLPrinter::Launch(const std::vector<llvm::StringRef> args) { +  llvm::outs() << GetLaunchURL(args) << '\n'; +  return llvm::Error::success(); +} diff --git a/lldb/tools/lldb-dap/ClientLauncher.h b/lldb/tools/lldb-dap/ClientLauncher.h new file mode 100644 index 0000000..780b178 --- /dev/null +++ b/lldb/tools/lldb-dap/ClientLauncher.h @@ -0,0 +1,50 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLDB_TOOLS_LLDB_DAP_CLIENTLAUNCHER_H +#define LLDB_TOOLS_LLDB_DAP_CLIENTLAUNCHER_H + +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Error.h" +#include <vector> + +namespace lldb_dap { + +class ClientLauncher { +public: +  enum Client { +    VSCode, +    VSCodeURL, +  }; + +  virtual ~ClientLauncher() = default; +  virtual llvm::Error Launch(const std::vector<llvm::StringRef> args) = 0; + +  static std::optional<Client> GetClientFrom(llvm::StringRef str); +  static std::unique_ptr<ClientLauncher> GetLauncher(Client client); +}; + +class VSCodeLauncher : public ClientLauncher { +public: +  using ClientLauncher::ClientLauncher; + +  llvm::Error Launch(const std::vector<llvm::StringRef> args) override; + +  std::string GetLaunchURL(const std::vector<llvm::StringRef> args) const; +  static std::string URLEncode(llvm::StringRef str); +}; + +class VSCodeURLPrinter : public VSCodeLauncher { +  using VSCodeLauncher::VSCodeLauncher; + +  llvm::Error Launch(const std::vector<llvm::StringRef> args) override; +}; + +} // namespace lldb_dap + +#endif diff --git a/lldb/tools/lldb-dap/tool/Options.td b/lldb/tools/lldb-dap/tool/Options.td index 5e9dd7a..339a64f 100644 --- a/lldb/tools/lldb-dap/tool/Options.td +++ b/lldb/tools/lldb-dap/tool/Options.td @@ -82,3 +82,11 @@ def connection_timeout: S<"connection-timeout">,      "timeout is reached, the server will be closed and the process will exit. "      "Not specifying this argument or specifying non-positive values will "      "cause the server to wait for new connections indefinitely.">; + +def client +    : S<"client">, +      MetaVarName<"<client>">, +      HelpText< +          "Use lldb-dap as a launcher for a curated number of DAP client.">; + +def REM : R<["--"], "">; diff --git a/lldb/tools/lldb-dap/tool/lldb-dap.cpp b/lldb/tools/lldb-dap/tool/lldb-dap.cpp index 45caa1a..f10ed12 100644 --- a/lldb/tools/lldb-dap/tool/lldb-dap.cpp +++ b/lldb/tools/lldb-dap/tool/lldb-dap.cpp @@ -6,6 +6,7 @@  //  //===----------------------------------------------------------------------===// +#include "ClientLauncher.h"  #include "DAP.h"  #include "DAPLog.h"  #include "EventHelper.h" @@ -141,6 +142,12 @@ EXAMPLES:    debugger to attach to the process.      lldb-dap -g + +  You can also use lldb-dap to launch a supported client, for example the +  LLDB-DAP Visual Studio Code extension. + +    lldb-dap --client vscode -- /path/to/binary <args> +  )___";  } @@ -150,6 +157,29 @@ static void PrintVersion() {    llvm::outs() << "liblldb: " << lldb::SBDebugger::GetVersionString() << '\n';  } +static llvm::Error LaunchClient(const llvm::opt::InputArgList &args) { +  auto *client_arg = args.getLastArg(OPT_client); +  assert(client_arg && "must have client arg"); + +  std::optional<ClientLauncher::Client> client = +      ClientLauncher::GetClientFrom(client_arg->getValue()); +  if (!client) +    return llvm::createStringError( +        llvm::formatv("unsupported client: {0}", client_arg->getValue())); + +  std::vector<llvm::StringRef> launch_args; +  if (auto *arg = args.getLastArgNoClaim(OPT_REM)) { +    for (auto *value : arg->getValues()) { +      launch_args.push_back(value); +    } +  } + +  if (launch_args.empty()) +    return llvm::createStringError("no launch arguments provided"); + +  return ClientLauncher::GetLauncher(*client)->Launch(launch_args); +} +  #if not defined(_WIN32)  struct FDGroup {    int GetFlags() const { @@ -541,6 +571,14 @@ int main(int argc, char *argv[]) {      return EXIT_SUCCESS;    } +  if (input_args.hasArg(OPT_client)) { +    if (llvm::Error error = LaunchClient(input_args)) { +      llvm::WithColor::error() << llvm::toString(std::move(error)) << '\n'; +      return EXIT_FAILURE; +    } +    return EXIT_SUCCESS; +  } +    ReplMode default_repl_mode = ReplMode::Auto;    if (input_args.hasArg(OPT_repl_mode)) {      llvm::opt::Arg *repl_mode = input_args.getLastArg(OPT_repl_mode); diff --git a/lldb/unittests/DAP/CMakeLists.txt b/lldb/unittests/DAP/CMakeLists.txt index a08414c..b1fdef1 100644 --- a/lldb/unittests/DAP/CMakeLists.txt +++ b/lldb/unittests/DAP/CMakeLists.txt @@ -1,4 +1,5 @@  add_lldb_unittest(DAPTests +  ClientLauncherTest.cpp    DAPErrorTest.cpp    DAPTest.cpp    DAPTypesTest.cpp diff --git a/lldb/unittests/DAP/ClientLauncherTest.cpp b/lldb/unittests/DAP/ClientLauncherTest.cpp new file mode 100644 index 0000000..dbaf9ee --- /dev/null +++ b/lldb/unittests/DAP/ClientLauncherTest.cpp @@ -0,0 +1,71 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "ClientLauncher.h" +#include "llvm/ADT/StringRef.h" +#include "gtest/gtest.h" +#include <optional> + +using namespace lldb_dap; +using namespace llvm; + +TEST(ClientLauncherTest, GetClientFromVSCode) { +  std::optional<ClientLauncher::Client> result = +      ClientLauncher::GetClientFrom("vscode"); +  ASSERT_TRUE(result.has_value()); +  EXPECT_EQ(ClientLauncher::VSCode, result.value()); +} + +TEST(ClientLauncherTest, GetClientFromVSCodeUpperCase) { +  std::optional<ClientLauncher::Client> result = +      ClientLauncher::GetClientFrom("VSCODE"); +  ASSERT_TRUE(result.has_value()); +  EXPECT_EQ(ClientLauncher::VSCode, result.value()); +} + +TEST(ClientLauncherTest, GetClientFromVSCodeMixedCase) { +  std::optional<ClientLauncher::Client> result = +      ClientLauncher::GetClientFrom("VSCode"); +  ASSERT_TRUE(result.has_value()); +  EXPECT_EQ(ClientLauncher::VSCode, result.value()); +} + +TEST(ClientLauncherTest, GetClientFromInvalidString) { +  std::optional<ClientLauncher::Client> result = +      ClientLauncher::GetClientFrom("invalid"); +  EXPECT_FALSE(result.has_value()); +} + +TEST(ClientLauncherTest, GetClientFromEmptyString) { +  std::optional<ClientLauncher::Client> result = +      ClientLauncher::GetClientFrom(""); +  EXPECT_FALSE(result.has_value()); +} + +TEST(ClientLauncherTest, URLEncode) { +  EXPECT_EQ("", VSCodeLauncher::URLEncode("")); +  EXPECT_EQ( +      "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.~", +      VSCodeLauncher::URLEncode("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST" +                                "UVWXYZ0123456789-_.~")); +  EXPECT_EQ("hello%20world", VSCodeLauncher::URLEncode("hello world")); +  EXPECT_EQ("hello%21%40%23%24", VSCodeLauncher::URLEncode("hello!@#$")); +  EXPECT_EQ("%2Fpath%2Fto%2Ffile", VSCodeLauncher::URLEncode("/path/to/file")); +  EXPECT_EQ("key%3Dvalue%26key2%3Dvalue2", +            VSCodeLauncher::URLEncode("key=value&key2=value2")); +  EXPECT_EQ("100%25complete", VSCodeLauncher::URLEncode("100%complete")); +  EXPECT_EQ("file_name%20with%20spaces%20%26%20special%21.txt", +            VSCodeLauncher::URLEncode("file_name with spaces & special!.txt")); +  EXPECT_EQ("%00%01%02", +            VSCodeLauncher::URLEncode(llvm::StringRef("\x00\x01\x02", 3))); +  EXPECT_EQ("test-file_name.txt~", +            VSCodeLauncher::URLEncode("test-file_name.txt~")); + +  // UTF-8 encoded characters should be percent-encoded byte by byte. +  EXPECT_EQ("%C3%A9", VSCodeLauncher::URLEncode("é")); +} diff --git a/llvm/docs/CommandGuide/dsymutil.rst b/llvm/docs/CommandGuide/dsymutil.rst index 8e61e01..0e442d6 100644 --- a/llvm/docs/CommandGuide/dsymutil.rst +++ b/llvm/docs/CommandGuide/dsymutil.rst @@ -70,6 +70,14 @@ OPTIONS   Print this help output. +.. option:: --include-swiftmodules-from-interface + + Whether or not to copy binary swiftmodules built from textual .swiftinterface + files into the dSYM bundle. These typically come only from the SDK (since + textual interfaces require library evolution) and thus are a waste of space to + copy into the bundle. Turn this on if the swiftmodules are different from + those in the SDK. +  .. option:: --keep-function-for-static   Make a static variable keep the enclosing function even if it would have been diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst index 49184e3..d03f383 100644 --- a/llvm/docs/RISCVUsage.rst +++ b/llvm/docs/RISCVUsage.rst @@ -406,6 +406,12 @@ The current vendor extensions supported are:  ``XSfvcp``    LLVM implements `version 1.1.0 of the SiFive Vector Coprocessor Interface (VCIX) Software Specification <https://sifive.cdn.prismic.io/sifive/Zn3m1R5LeNNTwnLS_vcix-spec-software-v1p1.pdf>`__ by SiFive.  All instructions are prefixed with `sf.vc.` as described in the specification, and the riscv-toolchain-convention document linked above. +``Xsfvfexp16e``, ``Xsfvfbfexp16e``, and ``Xsfvfexp32e`` +  LLVM implements `version 0.5 of the Vector Exponential Extension Specification <https://www.sifive.com/document-file/exponential-function-instruction-xsfvfexp32e-xsfvf>`__ by SiFive. All instructions are prefixed with `sf.` as described in the specification linked above. + +``Xsfvfexpa`` and ``Xsfvfexpa64e`` +  LLVM implements `version 0.2 of the Vector Exponential Approximation Extension Specification <https://www.sifive.com/document-file/exponential-approximation-instruction-xsfvfexpa-ex>`__ by SiFive. All instructions are prefixed with `sf.` as described in the specification linked above. +  ``XSfvqmaccdod``, ``XSfvqmaccqoq``    LLVM implements `version 1.1.0 of the SiFive Int8 Matrix Multiplication Extensions Specification <https://sifive.cdn.prismic.io/sifive/1a2ad85b-d818-49f7-ba83-f51f1731edbe_int8-matmul-spec.pdf>`__ by SiFive.  All instructions are prefixed with `sf.` as described in the specification linked above. diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h index a013f27..8c0342ae 100644 --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -5339,6 +5339,17 @@ struct AAPotentialConstantValues      return nullptr;    } +  /// Return the minimum trailing zeros of potential constants +  unsigned getAssumedMinTrailingZeros() const { +    unsigned TrailingZeros = getAssumedSet().begin()->getBitWidth() + 1; +    for (const APInt &It : getAssumedSet()) { +      if (It.countTrailingZeros() < TrailingZeros) +        TrailingZeros = It.countTrailingZeros(); +    } +    if (TrailingZeros > getAssumedSet().begin()->getBitWidth()) +      return 0; +    return TrailingZeros; +  }    /// See AbstractAttribute::getName()    StringRef getName() const override { return "AAPotentialConstantValues"; } diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 4b4df98..637acd6 100644 --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -109,8 +109,10 @@ MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,    if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {      if (CI->getBitWidth() > 64)        MIB.addCImm(CI); -    else +    else if (CI->getBitWidth() == 1)        MIB.addImm(CI->getZExtValue()); +    else +      MIB.addImm(CI->getSExtValue());    } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {      MIB.addFPImm(CFP);    } else if (isa<ConstantPointerNull>(NumericConstant)) { diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index bb10cf6..d84c3fb 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -733,6 +733,8 @@ MachineOperand GetMOForConstDbgOp(const SDDbgOperand &Op) {    if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {      if (CI->getBitWidth() > 64)        return MachineOperand::CreateCImm(CI); +    if (CI->getBitWidth() == 1) +      return MachineOperand::CreateImm(CI->getZExtValue());      return MachineOperand::CreateImm(CI->getSExtValue());    }    if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index a522650..fa0c899 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8958,9 +8958,8 @@ bool SelectionDAGBuilder::canTailCall(const CallBase &CB) const {    // Avoid emitting tail calls in functions with the disable-tail-calls    // attribute.    const Function *Caller = CB.getParent()->getParent(); -  if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() == -          "true" && -      !isMustTailCall) +  if (!isMustTailCall && +      Caller->getFnAttribute("disable-tail-calls").getValueAsBool())      return false;    // We can't tail call inside a function with a swifterror argument. Lowering diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp index b775cbb..95d61a9 100644 --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -148,18 +148,10 @@ void Value::destroyValueName() {  }  bool Value::hasNUses(unsigned N) const { -  if (!UseList) -    return N == 0; - -  // TODO: Disallow for ConstantData and remove !UseList check?    return hasNItems(use_begin(), use_end(), N);  }  bool Value::hasNUsesOrMore(unsigned N) const { -  // TODO: Disallow for ConstantData and remove !UseList check? -  if (!UseList) -    return N == 0; -    return hasNItemsOrMore(use_begin(), use_end(), N);  } diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp index bd03ac0..3f41618 100644 --- a/llvm/lib/Passes/PassBuilderPipelines.cpp +++ b/llvm/lib/Passes/PassBuilderPipelines.cpp @@ -228,7 +228,7 @@ static cl::opt<bool> EnableLoopHeaderDuplication(  static cl::opt<bool>      EnableDFAJumpThreading("enable-dfa-jump-thread",                             cl::desc("Enable DFA jump threading"), -                           cl::init(false), cl::Hidden); +                           cl::init(true), cl::Hidden);  static cl::opt<bool>      EnableHotColdSplit("hot-cold-split", diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp index 1169f26..97298f9 100644 --- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp @@ -655,16 +655,10 @@ Function *AArch64Arm64ECCallLowering::buildGuestExitThunk(Function *F) {    BasicBlock *BB = BasicBlock::Create(M->getContext(), "", GuestExit);    IRBuilder<> B(BB); -  // Load the global symbol as a pointer to the check function. -  Value *GuardFn; -  if (cfguard_module_flag == 2 && !F->hasFnAttribute("guard_nocf")) -    GuardFn = GuardFnCFGlobal; -  else -    GuardFn = GuardFnGlobal; -  LoadInst *GuardCheckLoad = B.CreateLoad(PtrTy, GuardFn); - -  // Create new call instruction. The CFGuard check should always be a call, -  // even if the original CallBase is an Invoke or CallBr instruction. +  // Create new call instruction. The call check should always be a call, +  // even if the original CallBase is an Invoke or CallBr instructio. +  // This is treated as a direct call, so do not use GuardFnCFGlobal. +  LoadInst *GuardCheckLoad = B.CreateLoad(PtrTy, GuardFnGlobal);    Function *Thunk = buildExitThunk(F->getFunctionType(), F->getAttributes());    CallInst *GuardCheck = B.CreateCall(        GuardFnType, GuardCheckLoad, {F, Thunk}); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp index ddf9a24..fe81a5e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUUnifyDivergentExitNodes.cpp @@ -181,14 +181,52 @@ BasicBlock *AMDGPUUnifyDivergentExitNodesImpl::unifyReturnBlockSet(    return NewRetBlock;  } +static BasicBlock * +createDummyReturnBlock(Function &F, +                       SmallVector<BasicBlock *, 4> &ReturningBlocks) { +  BasicBlock *DummyReturnBB = +      BasicBlock::Create(F.getContext(), "DummyReturnBlock", &F); +  Type *RetTy = F.getReturnType(); +  Value *RetVal = RetTy->isVoidTy() ? nullptr : PoisonValue::get(RetTy); +  ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB); +  ReturningBlocks.push_back(DummyReturnBB); +  return DummyReturnBB; +} + +/// Handle conditional branch instructions (-> 2 targets) and callbr +/// instructions with N targets. +static void handleNBranch(Function &F, BasicBlock *BB, Instruction *BI, +                          BasicBlock *DummyReturnBB, +                          std::vector<DominatorTree::UpdateType> &Updates) { +  SmallVector<BasicBlock *, 2> Successors(successors(BB)); + +  // Create a new transition block to hold the conditional branch. +  BasicBlock *TransitionBB = BB->splitBasicBlock(BI, "TransitionBlock"); + +  Updates.reserve(Updates.size() + 2 * Successors.size() + 2); + +  // 'Successors' become successors of TransitionBB instead of BB, +  // and TransitionBB becomes a single successor of BB. +  Updates.emplace_back(DominatorTree::Insert, BB, TransitionBB); +  for (BasicBlock *Successor : Successors) { +    Updates.emplace_back(DominatorTree::Insert, TransitionBB, Successor); +    Updates.emplace_back(DominatorTree::Delete, BB, Successor); +  } + +  // Create a branch that will always branch to the transition block and +  // references DummyReturnBB. +  BB->getTerminator()->eraseFromParent(); +  BranchInst::Create(TransitionBB, DummyReturnBB, +                     ConstantInt::getTrue(F.getContext()), BB); +  Updates.emplace_back(DominatorTree::Insert, BB, DummyReturnBB); +} +  bool AMDGPUUnifyDivergentExitNodesImpl::run(Function &F, DominatorTree *DT,                                              const PostDominatorTree &PDT,                                              const UniformityInfo &UA) { -  assert(hasOnlySimpleTerminator(F) && "Unsupported block terminator."); -    if (PDT.root_size() == 0 ||        (PDT.root_size() == 1 && -       !isa<BranchInst>(PDT.getRoot()->getTerminator()))) +       !isa<BranchInst, CallBrInst>(PDT.getRoot()->getTerminator())))      return false;    // Loop over all of the blocks in a function, tracking all of the blocks that @@ -222,46 +260,28 @@ bool AMDGPUUnifyDivergentExitNodesImpl::run(Function &F, DominatorTree *DT,        if (HasDivergentExitBlock)          UnreachableBlocks.push_back(BB);      } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { - -      ConstantInt *BoolTrue = ConstantInt::getTrue(F.getContext()); -      if (DummyReturnBB == nullptr) { -        DummyReturnBB = -            BasicBlock::Create(F.getContext(), "DummyReturnBlock", &F); -        Type *RetTy = F.getReturnType(); -        Value *RetVal = RetTy->isVoidTy() ? nullptr : PoisonValue::get(RetTy); -        ReturnInst::Create(F.getContext(), RetVal, DummyReturnBB); -        ReturningBlocks.push_back(DummyReturnBB); -      } +      if (!DummyReturnBB) +        DummyReturnBB = createDummyReturnBlock(F, ReturningBlocks);        if (BI->isUnconditional()) {          BasicBlock *LoopHeaderBB = BI->getSuccessor(0);          BI->eraseFromParent(); // Delete the unconditional branch.          // Add a new conditional branch with a dummy edge to the return block. -        BranchInst::Create(LoopHeaderBB, DummyReturnBB, BoolTrue, BB); -        Updates.emplace_back(DominatorTree::Insert, BB, DummyReturnBB); -      } else { // Conditional branch. -        SmallVector<BasicBlock *, 2> Successors(successors(BB)); - -        // Create a new transition block to hold the conditional branch. -        BasicBlock *TransitionBB = BB->splitBasicBlock(BI, "TransitionBlock"); - -        Updates.reserve(Updates.size() + 2 * Successors.size() + 2); - -        // 'Successors' become successors of TransitionBB instead of BB, -        // and TransitionBB becomes a single successor of BB. -        Updates.emplace_back(DominatorTree::Insert, BB, TransitionBB); -        for (BasicBlock *Successor : Successors) { -          Updates.emplace_back(DominatorTree::Insert, TransitionBB, Successor); -          Updates.emplace_back(DominatorTree::Delete, BB, Successor); -        } - -        // Create a branch that will always branch to the transition block and -        // references DummyReturnBB. -        BB->getTerminator()->eraseFromParent(); -        BranchInst::Create(TransitionBB, DummyReturnBB, BoolTrue, BB); +        BranchInst::Create(LoopHeaderBB, DummyReturnBB, +                           ConstantInt::getTrue(F.getContext()), BB);          Updates.emplace_back(DominatorTree::Insert, BB, DummyReturnBB); +      } else { +        handleNBranch(F, BB, BI, DummyReturnBB, Updates);        }        Changed = true; +    } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(BB->getTerminator())) { +      if (!DummyReturnBB) +        DummyReturnBB = createDummyReturnBlock(F, ReturningBlocks); + +      handleNBranch(F, BB, CBI, DummyReturnBB, Updates); +      Changed = true; +    } else { +      llvm_unreachable("unsupported block terminator");      }    } diff --git a/llvm/lib/Target/BPF/BPFAsmPrinter.cpp b/llvm/lib/Target/BPF/BPFAsmPrinter.cpp index 77dc4a7..b2a8204 100644 --- a/llvm/lib/Target/BPF/BPFAsmPrinter.cpp +++ b/llvm/lib/Target/BPF/BPFAsmPrinter.cpp @@ -88,6 +88,16 @@ bool BPFAsmPrinter::doFinalization(Module &M) {      }    } +  for (GlobalObject &GO : M.global_objects()) { +    if (!GO.hasExternalWeakLinkage()) +      continue; + +    if (!SawTrapCall && GO.getName() == BPF_TRAP) { +      GO.eraseFromParent(); +      break; +    } +  } +    return AsmPrinter::doFinalization(M);  } @@ -160,6 +170,20 @@ bool BPFAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,  }  void BPFAsmPrinter::emitInstruction(const MachineInstr *MI) { +  if (MI->isCall()) { +    for (const MachineOperand &Op : MI->operands()) { +      if (Op.isGlobal()) { +        if (const GlobalValue *GV = Op.getGlobal()) +          if (GV->getName() == BPF_TRAP) +            SawTrapCall = true; +      } else if (Op.isSymbol()) { +        if (const MCSymbol *Sym = Op.getMCSymbol()) +          if (Sym->getName() == BPF_TRAP) +            SawTrapCall = true; +      } +    } +  } +    BPF_MC::verifyInstructionPredicates(MI->getOpcode(),                                        getSubtargetInfo().getFeatureBits()); diff --git a/llvm/lib/Target/BPF/BPFAsmPrinter.h b/llvm/lib/Target/BPF/BPFAsmPrinter.h index 90ef207..75a1d7e 100644 --- a/llvm/lib/Target/BPF/BPFAsmPrinter.h +++ b/llvm/lib/Target/BPF/BPFAsmPrinter.h @@ -39,6 +39,7 @@ public:  private:    BTFDebug *BTF;    TargetMachine &TM; +  bool SawTrapCall = false;    const BPFTargetMachine &getBTM() const;  }; diff --git a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp index 8ace2d2..eb4c884 100644 --- a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp +++ b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp @@ -194,9 +194,10 @@ void DXContainerGlobals::addResourcesForPSV(Module &M, PSVRuntimeInfo &PSV) {          dxbc::PSV::v2::ResourceBindInfo BindInfo;          BindInfo.Type = Type;          BindInfo.LowerBound = Binding.LowerBound; -        assert(Binding.Size == UINT32_MAX || -               (uint64_t)Binding.LowerBound + Binding.Size - 1 <= UINT32_MAX && -                   "Resource range is too large"); +        assert( +            (Binding.Size == UINT32_MAX || +             (uint64_t)Binding.LowerBound + Binding.Size - 1 <= UINT32_MAX) && +            "Resource range is too large");          BindInfo.UpperBound = (Binding.Size == UINT32_MAX)                                    ? UINT32_MAX                                    : Binding.LowerBound + Binding.Size - 1; diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp index c667a09..996d653 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp @@ -1836,7 +1836,7 @@ bool NVPTXDAGToDAGISel::tryFence(SDNode *N) {    return true;  } -NVPTXScopes::NVPTXScopes(LLVMContext &C) { +NVPTXScopes::NVPTXScopes(LLVMContext &C) : Context(&C) {    Scopes[C.getOrInsertSyncScopeID("singlethread")] = NVPTX::Scope::Thread;    Scopes[C.getOrInsertSyncScopeID("")] = NVPTX::Scope::System;    Scopes[C.getOrInsertSyncScopeID("block")] = NVPTX::Scope::Block; @@ -1851,11 +1851,21 @@ NVPTX::Scope NVPTXScopes::operator[](SyncScope::ID ID) const {    auto S = Scopes.find(ID);    if (S == Scopes.end()) { -    // TODO: -    // - Add API to LLVMContext to get the name of a single scope. -    // - Use that API here to print an error containing the name -    //   of this Unknown ID. -    report_fatal_error(formatv("Could not find scope ID={}.", int(ID))); +    auto scopeName = Context->getSyncScopeName(ID); +    assert(scopeName.has_value() && "Scope name must exist."); + +    // Build list of supported syncscopes programmatically +    SmallVector<StringRef> supportedScopes; +    for (const auto &Entry : Scopes) { +      if (auto name = Context->getSyncScopeName(Entry.first)) +        supportedScopes.push_back(name->empty() ? "<empty string>" : *name); +    } + +    reportFatalUsageError( +        formatv("NVPTX backend does not support syncscope \"{0}\" (ID={1}).\n" +                "Supported syncscopes are: {2}.", +                scopeName.value(), int(ID), +                make_range(supportedScopes.begin(), supportedScopes.end())));    }    return S->second;  } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h index 1cb579b..d525531 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.h @@ -35,6 +35,7 @@ struct NVPTXScopes {  private:    SmallMapVector<SyncScope::ID, NVPTX::Scope, 8> Scopes{}; +  LLVMContext *Context = nullptr;  };  class LLVM_LIBRARY_VISIBILITY NVPTXDAGToDAGISel : public SelectionDAGISel { diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 282cf5d..3d5a55c 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -95,7 +95,8 @@ private:    void addVectorLoadStoreOperands(MachineInstr &I,                                    SmallVectorImpl<SrcOp> &SrcOps,                                    unsigned &CurOp, bool IsMasked, -                                  bool IsStrided) const; +                                  bool IsStridedOrIndexed, +                                  LLT *IndexVT = nullptr) const;    bool selectIntrinsicWithSideEffects(MachineInstr &I,                                        MachineIRBuilder &MIB) const; @@ -722,15 +723,17 @@ static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {  void RISCVInstructionSelector::addVectorLoadStoreOperands(      MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp, -    bool IsMasked, bool IsStrided) const { +    bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {    // Base Pointer    auto PtrReg = I.getOperand(CurOp++).getReg();    SrcOps.push_back(PtrReg); -  // Stride -  if (IsStrided) { +  // Stride or Index +  if (IsStridedOrIndexed) {      auto StrideReg = I.getOperand(CurOp++).getReg();      SrcOps.push_back(StrideReg); +    if (IndexVT) +      *IndexVT = MRI->getType(StrideReg);    }    // Mask @@ -805,6 +808,70 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(      I.eraseFromParent();      return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);    } +  case Intrinsic::riscv_vloxei: +  case Intrinsic::riscv_vloxei_mask: +  case Intrinsic::riscv_vluxei: +  case Intrinsic::riscv_vluxei_mask: { +    bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask || +                    IntrinID == Intrinsic::riscv_vluxei_mask; +    bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei || +                     IntrinID == Intrinsic::riscv_vloxei_mask; +    LLT VT = MRI->getType(I.getOperand(0).getReg()); +    unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + +    // Result vector +    const Register DstReg = I.getOperand(0).getReg(); + +    // Sources +    bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm; +    unsigned CurOp = 2; +    SmallVector<SrcOp, 4> SrcOps; // Source registers. + +    // Passthru +    if (HasPassthruOperand) { +      auto PassthruReg = I.getOperand(CurOp++).getReg(); +      SrcOps.push_back(PassthruReg); +    } else { +      // Use NoRegister if there is no specified passthru. +      SrcOps.push_back(Register()); +    } +    LLT IndexVT; +    addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT); + +    RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); +    RISCVVType::VLMUL IndexLMUL = +        RISCVTargetLowering::getLMUL(getMVTForLLT(IndexVT)); +    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); +    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { +      reportFatalUsageError("The V extension does not support EEW=64 for index " +                            "values when XLEN=32"); +    } +    const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( +        IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), +        static_cast<unsigned>(IndexLMUL)); + +    auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps); + +    // Select VL +    auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); +    for (auto &RenderFn : *VLOpFn) +      RenderFn(PseudoMI); + +    // SEW +    PseudoMI.addImm(Log2SEW); + +    // Policy +    uint64_t Policy = RISCVVType::MASK_AGNOSTIC; +    if (IsMasked) +      Policy = I.getOperand(CurOp++).getImm(); +    PseudoMI.addImm(Policy); + +    // Memref +    PseudoMI.cloneMemRefs(I); + +    I.eraseFromParent(); +    return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); +  }    case Intrinsic::riscv_vsm:    case Intrinsic::riscv_vse:    case Intrinsic::riscv_vse_mask: @@ -847,6 +914,56 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(      I.eraseFromParent();      return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);    } +  case Intrinsic::riscv_vsoxei: +  case Intrinsic::riscv_vsoxei_mask: +  case Intrinsic::riscv_vsuxei: +  case Intrinsic::riscv_vsuxei_mask: { +    bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask || +                    IntrinID == Intrinsic::riscv_vsuxei_mask; +    bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei || +                     IntrinID == Intrinsic::riscv_vsoxei_mask; +    LLT VT = MRI->getType(I.getOperand(1).getReg()); +    unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + +    // Sources +    unsigned CurOp = 1; +    SmallVector<SrcOp, 4> SrcOps; // Source registers. + +    // Store value +    auto PassthruReg = I.getOperand(CurOp++).getReg(); +    SrcOps.push_back(PassthruReg); + +    LLT IndexVT; +    addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT); + +    RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); +    RISCVVType::VLMUL IndexLMUL = +        RISCVTargetLowering::getLMUL(getMVTForLLT(IndexVT)); +    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); +    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { +      reportFatalUsageError("The V extension does not support EEW=64 for index " +                            "values when XLEN=32"); +    } +    const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( +        IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), +        static_cast<unsigned>(IndexLMUL)); + +    auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps); + +    // Select VL +    auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); +    for (auto &RenderFn : *VLOpFn) +      RenderFn(PseudoMI); + +    // SEW +    PseudoMI.addImm(Log2SEW); + +    // Memref +    PseudoMI.cloneMemRefs(I); + +    I.eraseFromParent(); +    return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); +  }    }  } diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index e75dfe3..5b8cfb2 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -407,7 +407,6 @@ enum OperandType : unsigned {    OPERAND_SIMM5_PLUS1,    OPERAND_SIMM6,    OPERAND_SIMM6_NONZERO, -  OPERAND_SIMM8,    OPERAND_SIMM8_UNSIGNED,    OPERAND_SIMM10,    OPERAND_SIMM10_LSB0000_NONZERO, diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index b25a054..9078335 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -371,8 +371,8 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked,    RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -    report_fatal_error("The V extension does not support EEW=64 for index " -                       "values when XLEN=32"); +    reportFatalUsageError("The V extension does not support EEW=64 for index " +                          "values when XLEN=32");    }    const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(        NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), @@ -444,8 +444,8 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked,    RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);    unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());    if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -    report_fatal_error("The V extension does not support EEW=64 for index " -                       "values when XLEN=32"); +    reportFatalUsageError("The V extension does not support EEW=64 for index " +                          "values when XLEN=32");    }    const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(        NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), @@ -2223,8 +2223,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {        RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);        unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());        if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -        report_fatal_error("The V extension does not support EEW=64 for index " -                           "values when XLEN=32"); +        reportFatalUsageError("The V extension does not support EEW=64 for " +                              "index values when XLEN=32");        }        const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(            IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), @@ -2457,8 +2457,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {        RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);        unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());        if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { -        report_fatal_error("The V extension does not support EEW=64 for index " -                           "values when XLEN=32"); +        reportFatalUsageError("The V extension does not support EEW=64 for " +                              "index values when XLEN=32");        }        const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(            IsMasked, IsOrdered, IndexLog2EEW, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td index c31713e..1c6a5af 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -90,6 +90,7 @@ defvar ZfhminDExts = [ZfhminDExt, ZhinxminZdinxExt, ZhinxminZdinx32Ext];  //===----------------------------------------------------------------------===//  let Predicates = [HasHalfFPLoadStoreMove] in { +let canFoldAsLoad = 1 in  def FLH : FPLoad_r<0b001, "flh", FPR16, WriteFLD16>;  // Operands for stores are in the order srcreg, base, offset rather than diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2970cf4..b97b508 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -33034,12 +33034,13 @@ static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,        DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));    Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy) -                      : (Type *)FixedVectorType::get(ArgTy, 4); +                      : (Type *)FixedVectorType::get(ArgTy, 2);    TargetLowering::CallLoweringInfo CLI(DAG);    CLI.setDebugLoc(dl)        .setChain(DAG.getEntryNode()) -      .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args)); +      .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args)) +      .setIsPostTypeLegalization();    std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp index 5ed47ae..a6ac761 100644 --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -5185,6 +5185,7 @@ struct AADereferenceableCallSiteReturned final  // ------------------------ Align Argument Attribute ------------------------  namespace { +  static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,                                      Value &AssociatedValue, const Use *U,                                      const Instruction *I, bool &TrackUse) { @@ -5200,6 +5201,28 @@ static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,        TrackUse = true;      return 0;    } +  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) +    switch (II->getIntrinsicID()) { +    case Intrinsic::ptrmask: { +      // Is it appropriate to pull attribute in initialization? +      const auto *ConstVals = A.getAAFor<AAPotentialConstantValues>( +          QueryingAA, IRPosition::value(*II->getOperand(1)), DepClassTy::NONE); +      const auto *AlignAA = A.getAAFor<AAAlign>( +          QueryingAA, IRPosition::value(*II), DepClassTy::NONE); +      if (ConstVals && ConstVals->isValidState() && ConstVals->isAtFixpoint()) { +        unsigned ShiftValue = std::min(ConstVals->getAssumedMinTrailingZeros(), +                                       Value::MaxAlignmentExponent); +        Align ConstAlign(UINT64_C(1) << ShiftValue); +        if (ConstAlign >= AlignAA->getKnownAlign()) +          return Align(1).value(); +      } +      if (AlignAA) +        return AlignAA->getKnownAlign().value(); +      break; +    } +    default: +      break; +    }    MaybeAlign MA;    if (const auto *CB = dyn_cast<CallBase>(I)) { @@ -5499,6 +5522,44 @@ struct AAAlignCallSiteReturned final    AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)        : Base(IRP, A) {} +  ChangeStatus updateImpl(Attributor &A) override { +    Instruction *I = getIRPosition().getCtxI(); +    if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { +      switch (II->getIntrinsicID()) { +      case Intrinsic::ptrmask: { +        Align Alignment; +        bool Valid = false; + +        const auto *ConstVals = A.getAAFor<AAPotentialConstantValues>( +            *this, IRPosition::value(*II->getOperand(1)), DepClassTy::REQUIRED); +        if (ConstVals && ConstVals->isValidState()) { +          unsigned ShiftValue = +              std::min(ConstVals->getAssumedMinTrailingZeros(), +                       Value::MaxAlignmentExponent); +          Alignment = Align(UINT64_C(1) << ShiftValue); +          Valid = true; +        } + +        const auto *AlignAA = +            A.getAAFor<AAAlign>(*this, IRPosition::value(*(II->getOperand(0))), +                                DepClassTy::REQUIRED); +        if (AlignAA && AlignAA->isValidState()) { +          Alignment = std::max(AlignAA->getAssumedAlign(), Alignment); +          Valid = true; +        } + +        if (Valid) +          return clampStateAndIndicateChange<StateType>( +              this->getState(), +              std::min(this->getAssumedAlign(), Alignment).value()); +        break; +      } +      default: +        break; +      } +    } +    return Base::updateImpl(A); +  };    /// See AbstractAttribute::trackStatistics()    void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }  }; diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index bb6c879..239526e 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -337,7 +337,7 @@ static void buildPartialUnswitchConditionalBranch(  static void buildPartialInvariantUnswitchConditionalBranch(      BasicBlock &BB, ArrayRef<Value *> ToDuplicate, bool Direction,      BasicBlock &UnswitchedSucc, BasicBlock &NormalSucc, Loop &L, -    MemorySSAUpdater *MSSAU) { +    MemorySSAUpdater *MSSAU, const BranchInst &OriginalBranch) {    ValueToValueMapTy VMap;    for (auto *Val : reverse(ToDuplicate)) {      Instruction *Inst = cast<Instruction>(Val); @@ -377,8 +377,19 @@ static void buildPartialInvariantUnswitchConditionalBranch(    IRBuilder<> IRB(&BB);    IRB.SetCurrentDebugLocation(DebugLoc::getCompilerGenerated());    Value *Cond = VMap[ToDuplicate[0]]; -  IRB.CreateCondBr(Cond, Direction ? &UnswitchedSucc : &NormalSucc, -                   Direction ? &NormalSucc : &UnswitchedSucc); +  // The expectation is that ToDuplicate[0] is the condition used by the +  // OriginalBranch, case in which we can clone the profile metadata from there. +  auto *ProfData = +      !ProfcheckDisableMetadataFixes && +              ToDuplicate[0] == skipTrivialSelect(OriginalBranch.getCondition()) +          ? OriginalBranch.getMetadata(LLVMContext::MD_prof) +          : nullptr; +  auto *BR = +      IRB.CreateCondBr(Cond, Direction ? &UnswitchedSucc : &NormalSucc, +                       Direction ? &NormalSucc : &UnswitchedSucc, ProfData); +  if (!ProfData) +    setExplicitlyUnknownBranchWeightsIfProfiled(*BR, *BR->getFunction(), +                                                DEBUG_TYPE);  }  /// Rewrite the PHI nodes in an unswitched loop exit basic block. @@ -2515,7 +2526,7 @@ static void unswitchNontrivialInvariants(      // the branch in the split block.      if (PartiallyInvariant)        buildPartialInvariantUnswitchConditionalBranch( -          *SplitBB, Invariants, Direction, *ClonedPH, *LoopPH, L, MSSAU); +          *SplitBB, Invariants, Direction, *ClonedPH, *LoopPH, L, MSSAU, *BI);      else {        buildPartialUnswitchConditionalBranch(            *SplitBB, Invariants, Direction, *ClonedPH, *LoopPH, diff --git a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp index 5f6f66a..0a8f5ea 100644 --- a/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp +++ b/llvm/lib/Transforms/Scalar/StructurizeCFG.cpp @@ -558,11 +558,10 @@ void StructurizeCFG::analyzeLoops(RegionNode *N) {    } else {      // Test for successors as back edge      BasicBlock *BB = N->getNodeAs<BasicBlock>(); -    BranchInst *Term = cast<BranchInst>(BB->getTerminator()); - -    for (BasicBlock *Succ : Term->successors()) -      if (Visited.count(Succ)) -        Loops[Succ] = BB; +    if (BranchInst *Term = dyn_cast<BranchInst>(BB->getTerminator())) +      for (BasicBlock *Succ : Term->successors()) +        if (Visited.count(Succ)) +          Loops[Succ] = BB;    }  } @@ -594,7 +593,7 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) {    for (BasicBlock *P : predecessors(BB)) {      // Ignore it if it's a branch from outside into our region entry -    if (!ParentRegion->contains(P)) +    if (!ParentRegion->contains(P) || !dyn_cast<BranchInst>(P->getTerminator()))        continue;      Region *R = RI->getRegionFor(P); @@ -1402,13 +1401,17 @@ bool StructurizeCFG::makeUniformRegion(Region *R, UniformityInfo &UA) {  /// Run the transformation for each region found  bool StructurizeCFG::run(Region *R, DominatorTree *DT,                           const TargetTransformInfo *TTI) { -  if (R->isTopLevelRegion()) +  // CallBr and its corresponding direct target blocks are for now ignored by +  // this pass. This is not a limitation for the currently intended uses cases +  // of callbr in the AMDGPU backend. +  // Parent and child regions are not affected by this (current) restriction. +  // See `llvm/test/Transforms/StructurizeCFG/callbr.ll` for details. +  if (R->isTopLevelRegion() || isa<CallBrInst>(R->getEntry()->getTerminator()))      return false;    this->DT = DT;    this->TTI = TTI;    Func = R->getEntry()->getParent(); -  assert(hasOnlySimpleTerminator(*Func) && "Unsupported block terminator.");    ParentRegion = R; diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp index 46f2903..a03cf6e 100644 --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -3416,7 +3416,11 @@ DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,    // Create integer constant expression.    auto createIntegerExpression = [&DIB](const Constant &CV) -> DIExpression * {      const APInt &API = cast<ConstantInt>(&CV)->getValue(); -    std::optional<int64_t> InitIntOpt = API.trySExtValue(); +    std::optional<int64_t> InitIntOpt; +    if (API.getBitWidth() == 1) +      InitIntOpt = API.tryZExtValue(); +    else +      InitIntOpt = API.trySExtValue();      return InitIntOpt ? DIB.createConstantValueExpression(                              static_cast<uint64_t>(*InitIntOpt))                        : nullptr; diff --git a/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp b/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp index 94c5c170..e86ab13 100644 --- a/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp +++ b/llvm/lib/Transforms/Utils/UnifyLoopExits.cpp @@ -158,6 +158,7 @@ static bool unifyLoopExits(DominatorTree &DT, LoopInfo &LI, Loop *L) {    SmallVector<BasicBlock *, 8> CallBrTargetBlocksToFix;    // Redirect exiting edges through a control flow hub.    ControlFlowHub CHub; +  bool Changed = false;    for (unsigned I = 0; I < ExitingBlocks.size(); ++I) {      BasicBlock *BB = ExitingBlocks[I]; @@ -182,6 +183,10 @@ static bool unifyLoopExits(DominatorTree &DT, LoopInfo &LI, Loop *L) {          bool UpdatedLI = false;          BasicBlock *NewSucc =              SplitCallBrEdge(BB, Succ, J, &DTU, nullptr, &LI, &UpdatedLI); +        // SplitCallBrEdge modifies the CFG because it creates an intermediate +        // block. So we need to set the changed flag no matter what the +        // ControlFlowHub is going to do later. +        Changed = true;          // Even if CallBr and Succ do not have a common parent loop, we need to          // add the new target block to the parent loop of the current loop.          if (!UpdatedLI) @@ -207,6 +212,7 @@ static bool unifyLoopExits(DominatorTree &DT, LoopInfo &LI, Loop *L) {    bool ChangedCFG;    std::tie(LoopExitBlock, ChangedCFG) = CHub.finalize(        &DTU, GuardBlocks, "loop.exit", MaxBooleansInControlFlowHub.getValue()); +  ChangedCFG |= Changed;    if (!ChangedCFG)      return false; diff --git a/llvm/test/CodeGen/AArch64/cfguard-arm64ec.ll b/llvm/test/CodeGen/AArch64/cfguard-arm64ec.ll index bdbc99e..75e7ac90 100644 --- a/llvm/test/CodeGen/AArch64/cfguard-arm64ec.ll +++ b/llvm/test/CodeGen/AArch64/cfguard-arm64ec.ll @@ -2,15 +2,58 @@  declare void @called()  declare void @escaped() -define void @f(ptr %dst) { +define void @f(ptr %dst, ptr readonly %f) {    call void @called() +; CHECK:         bl      "#called"    store ptr @escaped, ptr %dst -  ret void +  call void %f() +; CHECK:       adrp    x10, $iexit_thunk$cdecl$v$v +; CHECK-NEXT:  add     x10, x10, :lo12:$iexit_thunk$cdecl$v$v +; CHECK-NEXT:  str     x8, [x20] +; CHECK-NEXT:  adrp    x8, __os_arm64x_check_icall_cfg +; CHECK-NEXT:  ldr     x8, [x8, :lo12:__os_arm64x_check_icall_cfg] +; CHECK-NEXT:  mov     x11, +; CHECK-NEXT:  blr     x8 +; CHECK-NEXT:  blr     x11 +    ret void  } +; CHECK-LABEL:    .def "#called$exit_thunk"; +; CHECK-NEXT:     .scl 2; +; CHECK-NEXT:     .type 32; +; CHECK-NEXT:     .endef +; CHECK-NEXT:     .section .wowthk$aa,"xr",discard,"#called$exit_thunk" +; CHECK-NEXT:     .globl "#called$exit_thunk"            // -- Begin function #called$exit_thunk +; CHECK-NEXT:     .p2align 2 +; CHECK-NEXT: "#called$exit_thunk":                   // @"#called$exit_thunk" +; CHECK-NEXT:     .weak_anti_dep called +; CHECK-NEXT: called = "#called" +; CHECK-NEXT:     .weak_anti_dep "#called" +; CHECK-NEXT: "#called" = "#called$exit_thunk" +; CHECK-NEXT:    .seh_proc "#called$exit_thunk" +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT:     str x30, [sp, #-16]!                // 8-byte Folded Spill +; CHECK-NEXT:     .seh_save_reg_x x30, 16 +; CHECK-NEXT:     .seh_endprologue +; CHECK-NEXT:     adrp x8, __os_arm64x_check_icall +; CHECK-NEXT:     adrp x11, called +; CHECK-NEXT:     add x11, x11, :lo12:called +; CHECK-NEXT:     ldr x8, [x8, :lo12:__os_arm64x_check_icall] +; CHECK-NEXT:     adrp x10, $iexit_thunk$cdecl$v$v +; CHECK-NEXT:     add x10, x10, :lo12:$iexit_thunk$cdecl$v$v +; CHECK-NEXT:     blr x8 +; CHECK-NEXT:     .seh_startepilogue +; CHECK-NEXT:     ldr x30, [sp], #16                  // 8-byte Folded Reload +; CHECK-NEXT:     .seh_save_reg_x x30, 16 +; CHECK-NEXT:     .seh_endepilogue +; CHECK-NEXT:     br x11 +; CHECK-NEXT:     .seh_endfunclet +; CHECK-NEXT:     .seh_endproc +  !llvm.module.flags = !{!0} -!0 = !{i32 2, !"cfguard", i32 1} +!0 = !{i32 2, !"cfguard", i32 2}  ; CHECK-LABEL: .section .gfids$y,"dr"  ; CHECK-NEXT:  .symidx escaped +; CHECK-NEXT:  .symidx $iexit_thunk$cdecl$v$v  ; CHECK-NOT:   .symidx diff --git a/llvm/test/CodeGen/AMDGPU/callbr.ll b/llvm/test/CodeGen/AMDGPU/callbr.ll new file mode 100644 index 0000000..253a6ec --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/callbr.ll @@ -0,0 +1,54 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s + +define void @callbr_inline_asm(ptr %src, ptr %dst1, ptr %dst2, i32 %c) { +; CHECK-LABEL: callbr_inline_asm: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT:    flat_load_dword v0, v[0:1] +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    v_cmp_gt_i32 vcc v6, 42; s_cbranch_vccnz .LBB0_2 +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:  ; %bb.1: ; %fallthrough +; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT:    flat_store_dword v[2:3], v0 +; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT:    s_setpc_b64 s[30:31] +; CHECK-NEXT:  .LBB0_2: ; Inline asm indirect target +; CHECK-NEXT:    ; %indirect +; CHECK-NEXT:    ; Label of block must be emitted +; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT:    flat_store_dword v[4:5], v0 +; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) +; CHECK-NEXT:    s_setpc_b64 s[30:31] +	%a = load i32, ptr %src, align 4 +	callbr void asm "v_cmp_gt_i32 vcc $0, 42; s_cbranch_vccnz ${1:l}", "r,!i"(i32 %c) to label %fallthrough [label %indirect] +fallthrough: +	store i32 %a, ptr %dst1, align 4 +	br label %ret +indirect: +	store i32 %a, ptr %dst2, align 4 +	br label %ret +ret: +	ret void +} + +define void @callbr_self_loop(i1 %c) { +; CHECK-LABEL: callbr_self_loop: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT:  .LBB1_1: ; %callbr +; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:    s_branch .LBB1_1 +; CHECK-NEXT:  .LBB1_2: ; Inline asm indirect target +; CHECK-NEXT:    ; %callbr.target.ret +; CHECK-NEXT:    ; Label of block must be emitted +; CHECK-NEXT:    s_setpc_b64 s[30:31] +  br label %callbr +callbr: +  callbr void asm "", "!i"() to label %callbr [label %ret] +ret: +  ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/do-not-unify-divergent-exit-nodes-with-musttail.ll b/llvm/test/CodeGen/AMDGPU/do-not-unify-divergent-exit-nodes-with-musttail.ll index 007e3f0..076a99f 100644 --- a/llvm/test/CodeGen/AMDGPU/do-not-unify-divergent-exit-nodes-with-musttail.ll +++ b/llvm/test/CodeGen/AMDGPU/do-not-unify-divergent-exit-nodes-with-musttail.ll @@ -3,6 +3,7 @@  declare void @foo(ptr)  declare i1 @bar(ptr) +declare i32 @bar32(ptr)  define void @musttail_call_without_return_value(ptr %p) {  ; CHECK-LABEL: define void @musttail_call_without_return_value( @@ -28,6 +29,31 @@ bb.1:    ret void  } +define void @musttail_call_without_return_value_callbr(ptr %p) { +; CHECK-LABEL: define void @musttail_call_without_return_value_callbr( +; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT:  [[ENTRY:.*:]] +; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[P]], align 1 +; CHECK-NEXT:    callbr void asm "", "r,!i"(i32 [[LOAD]]) +; CHECK-NEXT:            to label %[[BB_0:.*]] [label %bb.1] +; CHECK:       [[BB_0]]: +; CHECK-NEXT:    musttail call void @foo(ptr [[P]]) +; CHECK-NEXT:    ret void +; CHECK:       [[BB_1:.*:]] +; CHECK-NEXT:    ret void +; +entry: +  %load = load i32, ptr %p, align 1 +  callbr void asm "", "r,!i"(i32 %load) to label %bb.0 [label %bb.1] + +bb.0: +  musttail call void @foo(ptr %p) +  ret void + +bb.1: +  ret void +} +  define i1 @musttail_call_with_return_value(ptr %p) {  ; CHECK-LABEL: define i1 @musttail_call_with_return_value(  ; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] { @@ -51,3 +77,28 @@ bb.0:  bb.1:    ret i1 %load  } + +define i32 @musttail_call_with_return_value_callbr(ptr %p) { +; CHECK-LABEL: define i32 @musttail_call_with_return_value_callbr( +; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] { +; CHECK-NEXT:  [[ENTRY:.*:]] +; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[P]], align 1 +; CHECK-NEXT:    callbr void asm "", "r,!i"(i32 [[LOAD]]) +; CHECK-NEXT:            to label %[[BB_0:.*]] [label %bb.1] +; CHECK:       [[BB_0]]: +; CHECK-NEXT:    [[RET:%.*]] = musttail call i32 @bar32(ptr [[P]]) +; CHECK-NEXT:    ret i32 [[RET]] +; CHECK:       [[BB_1:.*:]] +; CHECK-NEXT:    ret i32 [[LOAD]] +; +entry: +  %load = load i32, ptr %p, align 1 +  callbr void asm "", "r,!i"(i32 %load) to label %bb.0 [label %bb.1] + +bb.0: +  %ret = musttail call i32 @bar32(ptr %p) +  ret i32 %ret + +bb.1: +  ret i32 %load +} diff --git a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll index 3e2e43f..df63592 100644 --- a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll +++ b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll @@ -36,26 +36,60 @@ loop:    br label %loop  } +define amdgpu_kernel void @infinite_loop_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loop_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loop_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP:%.*]] [] +; IR:       loop: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[DUMMYRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP]] [] +; IR:       DummyReturnBlock: +; IR-NEXT:    ret void +; +entry: +  callbr void asm "", ""() to label %loop [] + +loop: +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop [] +} +  define amdgpu_kernel void @infinite_loop_ret(ptr addrspace(1) %out) {  ; SI-LABEL: infinite_loop_ret:  ; SI:       ; %bb.0: ; %entry  ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0  ; SI-NEXT:    s_and_saveexec_b64 s[0:1], vcc -; SI-NEXT:    s_cbranch_execz .LBB1_3 +; SI-NEXT:    s_cbranch_execz .LBB2_3  ; SI-NEXT:  ; %bb.1: ; %loop.preheader  ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9  ; SI-NEXT:    s_mov_b32 s3, 0xf000  ; SI-NEXT:    s_mov_b32 s2, -1  ; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7  ; SI-NEXT:    s_and_b64 vcc, exec, -1 -; SI-NEXT:  .LBB1_2: ; %loop +; SI-NEXT:  .LBB2_2: ; %loop  ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1  ; SI-NEXT:    s_waitcnt lgkmcnt(0)  ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccnz .LBB1_2 -; SI-NEXT:  .LBB1_3: ; %UnifiedReturnBlock +; SI-NEXT:    s_cbranch_vccnz .LBB2_2 +; SI-NEXT:  .LBB2_3: ; %UnifiedReturnBlock  ; SI-NEXT:    s_endpgm  ; IR-LABEL: @infinite_loop_ret(  ; IR-NEXT:  entry: @@ -81,44 +115,93 @@ return:    ret void  } +define amdgpu_kernel void @infinite_loop_ret_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loop_ret_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0 +; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:  ; %bb.1: ; %loop.preheader +; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:  .LBB3_2: ; Inline asm indirect target +; SI-NEXT:    ; %UnifiedReturnBlock +; SI-NEXT:    ; Label of block must be emitted +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loop_ret_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() +; IR-NEXT:    [[COND:%.*]] = icmp eq i32 [[TMP]], 1 +; IR-NEXT:    [[COND32:%.*]] = zext i1 [[COND]] to i32 +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[COND32]]) +; IR-NEXT:            to label [[LOOP:%.*]] [label %UnifiedReturnBlock] +; IR:       loop: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[UNIFIEDRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP]] [] +; IR:       UnifiedReturnBlock: +; IR-NEXT:    ret void +; +entry: +  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() +  %cond = icmp eq i32 %tmp, 1 +  %cond32 = zext i1 %cond to i32 +  callbr void asm "", "r,!i"(i32 %cond32) to label %loop [label %return] + +loop: +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop [] + +return: +  ret void +} +  define amdgpu_kernel void @infinite_loops(ptr addrspace(1) %out) {  ; SI-LABEL: infinite_loops:  ; SI:       ; %bb.0: ; %entry  ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9  ; SI-NEXT:    s_mov_b64 s[2:3], -1 -; SI-NEXT:    s_cbranch_scc1 .LBB2_4 +; SI-NEXT:    s_cbranch_scc1 .LBB4_4  ; SI-NEXT:  ; %bb.1:  ; SI-NEXT:    s_mov_b32 s3, 0xf000  ; SI-NEXT:    s_mov_b32 s2, -1  ; SI-NEXT:    v_mov_b32_e32 v0, 0x378  ; SI-NEXT:    s_and_b64 vcc, exec, -1 -; SI-NEXT:  .LBB2_2: ; %loop2 +; SI-NEXT:  .LBB4_2: ; %loop2  ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1  ; SI-NEXT:    s_waitcnt lgkmcnt(0)  ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccnz .LBB2_2 +; SI-NEXT:    s_cbranch_vccnz .LBB4_2  ; SI-NEXT:  ; %bb.3: ; %Flow  ; SI-NEXT:    s_mov_b64 s[2:3], 0 -; SI-NEXT:  .LBB2_4: ; %Flow2 +; SI-NEXT:  .LBB4_4: ; %Flow2  ; SI-NEXT:    s_and_b64 vcc, exec, s[2:3]  ; SI-NEXT:    s_waitcnt lgkmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccz .LBB2_7 +; SI-NEXT:    s_cbranch_vccz .LBB4_7  ; SI-NEXT:  ; %bb.5:  ; SI-NEXT:    s_mov_b32 s3, 0xf000  ; SI-NEXT:    s_mov_b32 s2, -1  ; SI-NEXT:    s_waitcnt expcnt(0)  ; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7  ; SI-NEXT:    s_and_b64 vcc, exec, 0 -; SI-NEXT:  .LBB2_6: ; %loop1 +; SI-NEXT:  .LBB4_6: ; %loop1  ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1  ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccz .LBB2_6 -; SI-NEXT:  .LBB2_7: ; %DummyReturnBlock +; SI-NEXT:    s_cbranch_vccz .LBB4_6 +; SI-NEXT:  .LBB4_7: ; %DummyReturnBlock  ; SI-NEXT:    s_endpgm  ; IR-LABEL: @infinite_loops(  ; IR-NEXT:  entry: @@ -144,24 +227,78 @@ loop2:    br label %loop2  } +define amdgpu_kernel void @infinite_loops_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loops_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:  ; %bb.1: ; %loop1 +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_endpgm +; SI-NEXT:  .LBB5_2: ; Inline asm indirect target +; SI-NEXT:    ; %loop2.preheader +; SI-NEXT:    ; Label of block must be emitted +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x378 +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loops_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    callbr void asm "", "r,!i"(i32 poison) +; IR-NEXT:            to label [[LOOP1:%.*]] [label %loop2] +; IR:       loop1: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[DUMMYRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP1]] [] +; IR:       loop2: +; IR-NEXT:    store volatile i32 888, ptr addrspace(1) [[OUT]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK1:%.*]], label [[DUMMYRETURNBLOCK]] +; IR:       TransitionBlock1: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP2:%.*]] [] +; IR:       DummyReturnBlock: +; IR-NEXT:    ret void +; +entry: +  callbr void asm "", "r,!i"(i32 poison) to label %loop1 [label %loop2] + +loop1: +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop1 [] + +loop2: +  store volatile i32 888, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop2 [] +} +  define amdgpu_kernel void @infinite_loop_nest_ret(ptr addrspace(1) %out) {  ; SI-LABEL: infinite_loop_nest_ret:  ; SI:       ; %bb.0: ; %entry  ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0  ; SI-NEXT:    s_and_saveexec_b64 s[0:1], vcc -; SI-NEXT:    s_cbranch_execz .LBB3_5 +; SI-NEXT:    s_cbranch_execz .LBB6_5  ; SI-NEXT:  ; %bb.1: ; %outer_loop.preheader  ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9  ; SI-NEXT:    v_cmp_ne_u32_e64 s[0:1], 3, v0  ; SI-NEXT:    s_mov_b32 s7, 0xf000  ; SI-NEXT:    s_mov_b32 s6, -1  ; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 -; SI-NEXT:  .LBB3_2: ; %outer_loop +; SI-NEXT:  .LBB6_2: ; %outer_loop  ; SI-NEXT:    ; =>This Loop Header: Depth=1 -; SI-NEXT:    ; Child Loop BB3_3 Depth 2 +; SI-NEXT:    ; Child Loop BB6_3 Depth 2  ; SI-NEXT:    s_mov_b64 s[2:3], 0 -; SI-NEXT:  .LBB3_3: ; %inner_loop -; SI-NEXT:    ; Parent Loop BB3_2 Depth=1 +; SI-NEXT:  .LBB6_3: ; %inner_loop +; SI-NEXT:    ; Parent Loop BB6_2 Depth=1  ; SI-NEXT:    ; => This Inner Loop Header: Depth=2  ; SI-NEXT:    s_and_b64 s[8:9], exec, s[0:1]  ; SI-NEXT:    s_or_b64 s[2:3], s[8:9], s[2:3] @@ -169,13 +306,13 @@ define amdgpu_kernel void @infinite_loop_nest_ret(ptr addrspace(1) %out) {  ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_andn2_b64 exec, exec, s[2:3] -; SI-NEXT:    s_cbranch_execnz .LBB3_3 +; SI-NEXT:    s_cbranch_execnz .LBB6_3  ; SI-NEXT:  ; %bb.4: ; %loop.exit.guard -; SI-NEXT:    ; in Loop: Header=BB3_2 Depth=1 +; SI-NEXT:    ; in Loop: Header=BB6_2 Depth=1  ; SI-NEXT:    s_or_b64 exec, exec, s[2:3]  ; SI-NEXT:    s_mov_b64 vcc, 0 -; SI-NEXT:    s_branch .LBB3_2 -; SI-NEXT:  .LBB3_5: ; %UnifiedReturnBlock +; SI-NEXT:    s_branch .LBB6_2 +; SI-NEXT:  .LBB6_5: ; %UnifiedReturnBlock  ; SI-NEXT:    s_endpgm  ; IR-LABEL: @infinite_loop_nest_ret(  ; IR-NEXT:  entry: @@ -212,4 +349,82 @@ return:    ret void  } +define amdgpu_kernel void @infinite_loop_nest_ret_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loop_nest_ret_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0 +; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:  ; %bb.1: ; %outer_loop.preheader +; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9 +; SI-NEXT:    s_mov_b32 s7, 0xf000 +; SI-NEXT:    s_mov_b32 s6, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    s_and_b64 s[0:1], exec, 0 +; SI-NEXT:    s_branch .LBB7_3 +; SI-NEXT:  .LBB7_2: ; %loop.exit.guard +; SI-NEXT:    ; in Loop: Header=BB7_3 Depth=1 +; SI-NEXT:    s_and_b64 vcc, exec, s[2:3] +; SI-NEXT:    s_cbranch_vccnz .LBB7_5 +; SI-NEXT:  .LBB7_3: ; %outer_loop +; SI-NEXT:    ; =>This Inner Loop Header: Depth=1 +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_mov_b64 s[2:3], -1 +; SI-NEXT:    s_mov_b64 vcc, s[0:1] +; SI-NEXT:    s_cbranch_vccz .LBB7_2 +; SI-NEXT:  ; %bb.4: ; %TransitionBlock.target.outer_loop +; SI-NEXT:    ; in Loop: Header=BB7_3 Depth=1 +; SI-NEXT:    s_mov_b64 s[2:3], 0 +; SI-NEXT:    s_branch .LBB7_2 +; SI-NEXT:  .LBB7_5: ; Inline asm indirect target +; SI-NEXT:    ; %UnifiedReturnBlock +; SI-NEXT:    ; Label of block must be emitted +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loop_nest_ret_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() +; IR-NEXT:    [[COND1:%.*]] = icmp ne i32 [[TMP]], 1 +; IR-NEXT:    [[COND1_32:%.*]] = zext i1 [[COND1]] to i32 +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[COND1_32]]) +; IR-NEXT:            to label [[OUTER_LOOP:%.*]] [label %UnifiedReturnBlock] +; IR:       outer_loop: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[INNER_LOOP:%.*]] [] +; IR:       inner_loop: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    [[COND3:%.*]] = icmp eq i32 [[TMP]], 3 +; IR-NEXT:    [[COND3_32:%.*]] = zext i1 [[COND3]] to i32 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[UNIFIEDRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[COND3_32]]) +; IR-NEXT:            to label [[INNER_LOOP]] [label %outer_loop] +; IR:       UnifiedReturnBlock: +; IR-NEXT:    ret void +; +entry: +  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() +  %cond1 = icmp ne i32 %tmp, 1  ; avoid following BB optimizing away through the domination +  %cond1_32 = zext i1 %cond1 to i32 +  callbr void asm "", "r,!i"(i32 %cond1_32) to label %outer_loop [label %return] + +outer_loop: +  ; %cond2 = icmp eq i32 %tmp, 2 +  ; br i1 %cond2, label %outer_loop, label %inner_loop +  callbr void asm "", ""() to label %inner_loop [] + +inner_loop:                                     ; preds = %LeafBlock, %LeafBlock1 +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  %cond3 = icmp eq i32 %tmp, 3 +  %cond3_32 = zext i1 %cond3 to i32 +  callbr void asm "", "r,!i"(i32 %cond3_32) to label %inner_loop [label %outer_loop] + +return: +  ret void +} +  declare i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/AMDGPU/si-annotate-nested-control-flows.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-nested-control-flows.ll index 34de1e4..01bcdad 100644 --- a/llvm/test/CodeGen/AMDGPU/si-annotate-nested-control-flows.ll +++ b/llvm/test/CodeGen/AMDGPU/si-annotate-nested-control-flows.ll @@ -3,15 +3,16 @@  ; RUN: llc -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck %s --check-prefix=ISA  define void @nested_inf_loop(i1 %0, i1 %1) { -; OPT-LABEL: @nested_inf_loop( -; OPT-NEXT:  BB: -; OPT-NEXT:    br label [[BB1:%.*]] -; OPT:       BB1: -; OPT-NEXT:    [[BRMERGE:%.*]] = select i1 [[TMP0:%.*]], i1 true, i1 [[TMP1:%.*]] -; OPT-NEXT:    br i1 [[BRMERGE]], label [[BB1]], label [[INFLOOP:%.*]] -; OPT:       infloop: -; OPT-NEXT:    br i1 true, label [[INFLOOP]], label [[DUMMYRETURNBLOCK:%.*]] -; OPT:       DummyReturnBlock: +; OPT-LABEL: define void @nested_inf_loop( +; OPT-SAME: i1 [[TMP0:%.*]], i1 [[TMP1:%.*]]) { +; OPT-NEXT:  [[BB:.*:]] +; OPT-NEXT:    br label %[[BB1:.*]] +; OPT:       [[BB1]]: +; OPT-NEXT:    [[BRMERGE:%.*]] = select i1 [[TMP0]], i1 true, i1 [[TMP1]] +; OPT-NEXT:    br i1 [[BRMERGE]], label %[[BB1]], label %[[INFLOOP:.*]] +; OPT:       [[INFLOOP]]: +; OPT-NEXT:    br i1 true, label %[[INFLOOP]], label %[[DUMMYRETURNBLOCK:.*]] +; OPT:       [[DUMMYRETURNBLOCK]]:  ; OPT-NEXT:    ret void  ;  ; ISA-LABEL: nested_inf_loop: @@ -63,3 +64,84 @@ BB4:  BB3:    br label %BB1  } + +define void @nested_inf_loop_callbr(i32 %0, i32 %1) { +; OPT-LABEL: define void @nested_inf_loop_callbr( +; OPT-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]]) { +; OPT-NEXT:  [[BB:.*:]] +; OPT-NEXT:    callbr void asm "", ""() +; OPT-NEXT:            to label %[[BB1:.*]] [] +; OPT:       [[BB1]]: +; OPT-NEXT:    callbr void asm "", "r,!i"(i32 [[TMP0]]) +; OPT-NEXT:            to label %[[BB3:.*]] [label %BB2] +; OPT:       [[BB2:.*:]] +; OPT-NEXT:    callbr void asm "", ""() +; OPT-NEXT:            to label %[[BB4:.*]] [] +; OPT:       [[BB4]]: +; OPT-NEXT:    br i1 true, label %[[TRANSITIONBLOCK:.*]], label %[[DUMMYRETURNBLOCK:.*]] +; OPT:       [[TRANSITIONBLOCK]]: +; OPT-NEXT:    callbr void asm "", "r,!i"(i32 [[TMP1]]) +; OPT-NEXT:            to label %[[BB3]] [label %BB4] +; OPT:       [[BB3]]: +; OPT-NEXT:    callbr void asm "", ""() +; OPT-NEXT:            to label %[[BB1]] [] +; OPT:       [[DUMMYRETURNBLOCK]]: +; OPT-NEXT:    ret void +; +; ISA-LABEL: nested_inf_loop_callbr: +; ISA:       ; %bb.0: ; %BB +; ISA-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; ISA-NEXT:    ;;#ASMSTART +; ISA-NEXT:    ;;#ASMEND +; ISA-NEXT:    ; implicit-def: $sgpr6_sgpr7 +; ISA-NEXT:    ; implicit-def: $sgpr4_sgpr5 +; ISA-NEXT:  .LBB1_1: ; %BB1 +; ISA-NEXT:    ; =>This Inner Loop Header: Depth=1 +; ISA-NEXT:    ;;#ASMSTART +; ISA-NEXT:    ;;#ASMEND +; ISA-NEXT:    s_andn2_b64 s[6:7], s[6:7], exec +; ISA-NEXT:    s_and_b64 s[8:9], s[4:5], exec +; ISA-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9] +; ISA-NEXT:  .LBB1_2: ; %BB3 +; ISA-NEXT:    ; in Loop: Header=BB1_1 Depth=1 +; ISA-NEXT:    ;;#ASMSTART +; ISA-NEXT:    ;;#ASMEND +; ISA-NEXT:    s_andn2_b64 s[4:5], s[4:5], exec +; ISA-NEXT:    s_and_b64 s[8:9], s[6:7], exec +; ISA-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9] +; ISA-NEXT:    s_branch .LBB1_1 +; ISA-NEXT:  .LBB1_3: ; Inline asm indirect target +; ISA-NEXT:    ; %BB2 +; ISA-NEXT:    ; in Loop: Header=BB1_1 Depth=1 +; ISA-NEXT:    ; Label of block must be emitted +; ISA-NEXT:    ;;#ASMSTART +; ISA-NEXT:    ;;#ASMEND +; ISA-NEXT:    s_mov_b64 s[6:7], -1 +; ISA-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5] +; ISA-NEXT:    s_cbranch_execz .LBB1_5 +; ISA-NEXT:  ; %bb.4: ; %TransitionBlock.target.BB3 +; ISA-NEXT:    ; in Loop: Header=BB1_1 Depth=1 +; ISA-NEXT:    s_xor_b64 s[6:7], exec, -1 +; ISA-NEXT:  .LBB1_5: ; %loop.exit.guard +; ISA-NEXT:    ; in Loop: Header=BB1_1 Depth=1 +; ISA-NEXT:    s_or_b64 exec, exec, s[8:9] +; ISA-NEXT:    s_and_b64 vcc, exec, s[6:7] +; ISA-NEXT:    s_mov_b64 s[6:7], 0 +; ISA-NEXT:    s_cbranch_vccz .LBB1_2 +; ISA-NEXT:  ; %bb.6: ; %DummyReturnBlock +; ISA-NEXT:    s_setpc_b64 s[30:31] +BB: +  callbr void asm "", ""() to label %BB1 [] + +BB1: +  callbr void asm "", "r,!i"(i32 %0) to label %BB3 [label %BB2] + +BB2: +  callbr void asm "", ""() to label %BB4 [] + +BB4: +  callbr void asm "", "r,!i"(i32 %1) to label %BB3 [label %BB4] + +BB3: +  callbr void asm "", ""() to label %BB1 [] +} diff --git a/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll b/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll index 4cbe682..004c279 100644 --- a/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll +++ b/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll @@ -1,5 +1,5 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -stop-after=amdgpu-unify-divergent-exit-nodes | FileCheck %s --check-prefix=UNIFY +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -stop-after=amdgpu-unify-divergent-exit-nodes | FileCheck %s --check-prefix=UNIFY  ; RUN: llc < %s -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 | FileCheck %s  declare void @llvm.trap() @@ -70,8 +70,33 @@ define amdgpu_kernel void @kernel(i32 %a, ptr addrspace(1) %x, i32 noundef %n) {  ; CHECK-NEXT:    s_mov_b64 s[2:3], -1  ; CHECK-NEXT:    s_trap 2  ; CHECK-NEXT:    s_branch .LBB0_4 - - +; UNIFY-LABEL: @kernel( +; UNIFY-NEXT:  entry: +; UNIFY-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x() +; UNIFY-NEXT:    [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 256 +; UNIFY-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; UNIFY:       if.then: +; UNIFY-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[A:%.*]], 0 +; UNIFY-NEXT:    br i1 [[CMP1]], label [[IF_END6_SINK_SPLIT:%.*]], label [[COND_FALSE:%.*]] +; UNIFY:       cond.false: +; UNIFY-NEXT:    call void @llvm.trap() +; UNIFY-NEXT:    unreachable +; UNIFY:       if.else: +; UNIFY-NEXT:    [[CMP2:%.*]] = icmp ult i32 [[TID]], 10 +; UNIFY-NEXT:    br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END6:%.*]] +; UNIFY:       if.then3: +; UNIFY-NEXT:    [[CMP1_I7:%.*]] = icmp eq i32 [[A]], 0 +; UNIFY-NEXT:    br i1 [[CMP1_I7]], label [[IF_END6_SINK_SPLIT]], label [[COND_FALSE_I8:%.*]] +; UNIFY:       cond.false.i8: +; UNIFY-NEXT:    call void @llvm.trap() +; UNIFY-NEXT:    unreachable +; UNIFY:       if.end6.sink.split: +; UNIFY-NEXT:    [[X1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[X:%.*]], i32 [[TID]] +; UNIFY-NEXT:    store i32 [[A]], ptr addrspace(1) [[X1]], align 4 +; UNIFY-NEXT:    br label [[IF_END6]] +; UNIFY:       if.end6: +; UNIFY-NEXT:    ret void +;  entry:    %tid = call i32 @llvm.amdgcn.workitem.id.x()    %cmp = icmp eq i32 %n, 256 @@ -105,5 +130,129 @@ if.end6.sink.split:  if.end6:    ret void  } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; UNIFY: {{.*}} + +define amdgpu_kernel void @kernel_callbr(i32 %a, ptr addrspace(1) %x, i32 noundef %n) { +; CHECK-LABEL: kernel_callbr: +; CHECK:       ; %bb.0: ; %entry +; CHECK-NEXT:    s_load_dword s1, s[8:9], 0x10 +; CHECK-NEXT:    s_load_dword s0, s[8:9], 0x0 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    s_cmpk_eq_i32 s1, 0x100 +; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[2:3] +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:  ; %bb.1: ; %if.then +; CHECK-NEXT:    s_cmp_eq_u32 s0, 0 +; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[2:3] +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:  .LBB1_2: ; %if.end6.sink.split +; CHECK-NEXT:    s_load_dwordx2 s[2:3], s[8:9], 0x8 +; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0 +; CHECK-NEXT:    v_mov_b32_e32 v1, s0 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_dword v0, v1, s[2:3] +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:  .LBB1_3: ; Inline asm indirect target +; CHECK-NEXT:    ; %UnifiedReturnBlock +; CHECK-NEXT:    ; Label of block must be emitted +; CHECK-NEXT:    s_endpgm +; CHECK-NEXT:  .LBB1_4: ; Inline asm indirect target +; CHECK-NEXT:    ; %if.else +; CHECK-NEXT:    ; Label of block must be emitted +; CHECK-NEXT:    v_cmp_gt_u32_e32 vcc, 10, v0 +; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:  ; %bb.5: ; %if.then3 +; CHECK-NEXT:    s_cmp_eq_u32 s0, 0 +; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[2:3] +; CHECK-NEXT:    ;;#ASMSTART +; CHECK-NEXT:    ;;#ASMEND +; CHECK-NEXT:    s_branch .LBB1_2 +; CHECK-NEXT:  .LBB1_6: ; Inline asm indirect target +; CHECK-NEXT:    ; %cond.false.i8 +; CHECK-NEXT:    ; Label of block must be emitted +; CHECK-NEXT:  .LBB1_7: ; Inline asm indirect target +; CHECK-NEXT:    ; %cond.false +; CHECK-NEXT:    ; Label of block must be emitted +; CHECK-NEXT:    s_trap 2 +; CHECK-NEXT:    ; divergent unreachable +; CHECK-NEXT:    s_branch .LBB1_3 +; UNIFY-LABEL: @kernel_callbr( +; UNIFY-NEXT:  entry: +; UNIFY-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x() +; UNIFY-NEXT:    [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 256 +; UNIFY-NEXT:    [[CMP32:%.*]] = zext i1 [[CMP]] to i32 +; UNIFY-NEXT:    callbr void asm "", "r,!i"(i32 [[CMP32]]) +; UNIFY-NEXT:            to label [[IF_THEN:%.*]] [label %if.else] +; UNIFY:       if.then: +; UNIFY-NEXT:    [[CMP1:%.*]] = icmp eq i32 [[A:%.*]], 0 +; UNIFY-NEXT:    [[CMP1_32:%.*]] = zext i1 [[CMP1]] to i32 +; UNIFY-NEXT:    callbr void asm "", "r,!i"(i32 [[CMP1_32]]) +; UNIFY-NEXT:            to label [[IF_END6_SINK_SPLIT:%.*]] [label %cond.false] +; UNIFY:       cond.false: +; UNIFY-NEXT:    call void @llvm.trap() +; UNIFY-NEXT:    unreachable +; UNIFY:       if.else: +; UNIFY-NEXT:    [[CMP2:%.*]] = icmp ult i32 [[TID]], 10 +; UNIFY-NEXT:    [[CMP2_32:%.*]] = zext i1 [[CMP2]] to i32 +; UNIFY-NEXT:    callbr void asm "", "r,!i"(i32 [[CMP2_32]]) +; UNIFY-NEXT:            to label [[IF_THEN3:%.*]] [label %if.end6] +; UNIFY:       if.then3: +; UNIFY-NEXT:    [[CMP1_I7:%.*]] = icmp eq i32 [[A]], 0 +; UNIFY-NEXT:    [[CMP1_I7_32:%.*]] = zext i1 [[CMP1_I7]] to i32 +; UNIFY-NEXT:    callbr void asm "", "r,!i"(i32 [[CMP1_I7_32]]) +; UNIFY-NEXT:            to label [[IF_END6_SINK_SPLIT]] [label %cond.false.i8] +; UNIFY:       cond.false.i8: +; UNIFY-NEXT:    call void @llvm.trap() +; UNIFY-NEXT:    unreachable +; UNIFY:       if.end6.sink.split: +; UNIFY-NEXT:    [[X1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[X:%.*]], i32 [[TID]] +; UNIFY-NEXT:    store i32 [[A]], ptr addrspace(1) [[X1]], align 4 +; UNIFY-NEXT:    callbr void asm "", ""() +; UNIFY-NEXT:            to label [[IF_END6:%.*]] [] +; UNIFY:       if.end6: +; UNIFY-NEXT:    ret void +; +entry: +  %tid = call i32 @llvm.amdgcn.workitem.id.x() +  %cmp = icmp eq i32 %n, 256 +  %cmp32 = zext i1 %cmp to i32 +  callbr void asm "", "r,!i"(i32 %cmp32) to label %if.then [label %if.else] + +if.then: +  %cmp1 = icmp eq i32 %a, 0 +  %cmp1_32 = zext i1 %cmp1 to i32 +  callbr void asm "", "r,!i"(i32 %cmp1_32) to label %if.end6.sink.split [label %cond.false] + +cond.false: +  call void @llvm.trap() +  unreachable + +if.else: +  %cmp2 = icmp ult i32 %tid, 10 +  %cmp2_32 = zext i1 %cmp2 to i32 +  callbr void asm "", "r,!i"(i32 %cmp2_32) to label %if.then3 [label %if.end6] + +if.then3: +  %cmp1.i7 = icmp eq i32 %a, 0 +  %cmp1.i7_32 = zext i1 %cmp1.i7 to i32 +  callbr void asm "", "r,!i"(i32 %cmp1.i7_32) to label %if.end6.sink.split [label %cond.false.i8] + +cond.false.i8: +  call void @llvm.trap() +  unreachable + +if.end6.sink.split: +  %x1 = getelementptr inbounds i32, ptr addrspace(1) %x, i32 %tid +  store i32 %a, ptr addrspace(1) %x1, align 4 +  callbr void asm "", ""() to label %if.end6 [] + +if.end6: +  ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/update-phi.ll b/llvm/test/CodeGen/AMDGPU/update-phi.ll index 50666be..684dc1a 100644 --- a/llvm/test/CodeGen/AMDGPU/update-phi.ll +++ b/llvm/test/CodeGen/AMDGPU/update-phi.ll @@ -37,3 +37,42 @@ n28:                                               ; preds = %.loopexit, %n28  n31:                                               ; preds =    ret void  } + +define amdgpu_ps void @_amdgpu_ps_main_callbr() local_unnamed_addr #3 { +; IR-LABEL: @_amdgpu_ps_main_callbr( +; IR-NEXT:  .entry: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[DOTLOOPEXIT:%.*]] [] +; IR:       .loopexit: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[N28:%.*]] [] +; IR:       n28: +; IR-NEXT:    [[DOT01:%.*]] = phi float [ 0.000000e+00, [[DOTLOOPEXIT]] ], [ [[N29:%.*]], [[TRANSITIONBLOCK:%.*]] ] +; IR-NEXT:    [[N29]] = fadd float [[DOT01]], 1.000000e+00 +; IR-NEXT:    [[N30:%.*]] = fcmp ogt float [[N29]], 4.000000e+00 +; IR-NEXT:    [[N30_32:%.*]] = zext i1 [[N30]] to i32 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK]], label [[DUMMYRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[N30_32]]) +; IR-NEXT:            to label [[DOTLOOPEXIT]] [label %n28] +; IR:       n31: +; IR-NEXT:    ret void +; IR:       DummyReturnBlock: +; IR-NEXT:    ret void +; +.entry: +  callbr void asm "", ""() to label %.loopexit [] + +.loopexit:                                        ; preds = %n28, %.entry +  callbr void asm "", ""() to label %n28 [] + +n28:                                               ; preds = %.loopexit, %n28 +  %.01 = phi float [ 0.000000e+00, %.loopexit ], [ %n29, %n28 ] +  %n29 = fadd float %.01, 1.0 +  %n30 = fcmp ogt float %n29, 4.000000e+00 +  %n30.32 = zext i1 %n30 to i32 +  callbr void asm "", "r,!i"(i32 %n30.32) to label %.loopexit [label %n28] + +n31:                                               ; preds = +  ret void +} diff --git a/llvm/test/CodeGen/ARM/llvm.sincos.ll b/llvm/test/CodeGen/ARM/llvm.sincos.ll index 9628405..1448fac 100644 --- a/llvm/test/CodeGen/ARM/llvm.sincos.ll +++ b/llvm/test/CodeGen/ARM/llvm.sincos.ll @@ -1,223 +1,1004 @@  ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -mtriple=thumbv7-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s +; RUN: llc -mtriple=thumbv7-gnu-linux < %s | FileCheck -check-prefix=GNU %s +; RUN: llc -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 < %s | FileCheck -check-prefix=GNUEABI %s +; RUN: llc -mtriple=armv7-apple-ios6 -mcpu=cortex-a8 < %s | FileCheck -check-prefixes=IOS,IOS-NO-STRET %s +; RUN: llc -mtriple=armv7-apple-ios7 -mcpu=cortex-a8 < %s | FileCheck -check-prefixes=IOS,IOS-WITH-STRET %s +; RUN: llc -mtriple=thumbv7k-apple-watchos2.0 < %s | FileCheck -check-prefix=WATCHABI %s  define { half, half } @test_sincos_f16(half %a) { -; CHECK-LABEL: test_sincos_f16: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r4, lr} -; CHECK-NEXT:    sub sp, #8 -; CHECK-NEXT:    bl __gnu_h2f_ieee -; CHECK-NEXT:    add r1, sp, #4 -; CHECK-NEXT:    mov r2, sp -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    ldr r0, [sp, #4] -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    mov r4, r0 -; CHECK-NEXT:    ldr r0, [sp] -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    mov r1, r0 -; CHECK-NEXT:    mov r0, r4 -; CHECK-NEXT:    add sp, #8 -; CHECK-NEXT:    pop {r4, pc} +; GNU-LABEL: test_sincos_f16: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r4, lr} +; GNU-NEXT:    sub sp, #8 +; GNU-NEXT:    bl __gnu_h2f_ieee +; GNU-NEXT:    add r1, sp, #4 +; GNU-NEXT:    mov r2, sp +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    ldr r0, [sp, #4] +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    mov r4, r0 +; GNU-NEXT:    ldr r0, [sp] +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    mov r1, r0 +; GNU-NEXT:    mov r0, r4 +; GNU-NEXT:    add sp, #8 +; GNU-NEXT:    pop {r4, pc} +; +; GNUEABI-LABEL: test_sincos_f16: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r4, lr} +; GNUEABI-NEXT:    push {r4, lr} +; GNUEABI-NEXT:    .pad #8 +; GNUEABI-NEXT:    sub sp, sp, #8 +; GNUEABI-NEXT:    bl __gnu_h2f_ieee +; GNUEABI-NEXT:    add r1, sp, #4 +; GNUEABI-NEXT:    mov r2, sp +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    ldr r0, [sp, #4] +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    mov r4, r0 +; GNUEABI-NEXT:    ldr r0, [sp] +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    mov r1, r0 +; GNUEABI-NEXT:    mov r0, r4 +; GNUEABI-NEXT:    add sp, sp, #8 +; GNUEABI-NEXT:    pop {r4, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_f16: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {r4, r5, lr} +; IOS-NO-STRET-NEXT:    bl ___extendhfsf2 +; IOS-NO-STRET-NEXT:    mov r4, r0 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    mov r5, r0 +; IOS-NO-STRET-NEXT:    mov r0, r4 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    mov r1, r0 +; IOS-NO-STRET-NEXT:    mov r0, r5 +; IOS-NO-STRET-NEXT:    pop {r4, r5, pc} +; +; IOS-WITH-STRET-LABEL: test_sincos_f16: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {r4, r5, lr} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #8 +; IOS-WITH-STRET-NEXT:    bl ___extendhfsf2 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    ldm sp, {r0, r4} +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    mov r5, r0 +; IOS-WITH-STRET-NEXT:    mov r0, r4 +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, r5 +; IOS-WITH-STRET-NEXT:    add sp, sp, #8 +; IOS-WITH-STRET-NEXT:    pop {r4, r5, pc} +; +; WATCHABI-LABEL: test_sincos_f16: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 16 +; WATCHABI-NEXT:    vcvtb.f32.f16 s0, s0 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s0 +; WATCHABI-NEXT:    vcvtb.f16.f32 s1, s1 +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { half, half } @llvm.sincos.f16(half %a)    ret { half, half } %result  }  define half @test_sincos_f16_only_use_sin(half %a) { -; CHECK-LABEL: test_sincos_f16_only_use_sin: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r7, lr} -; CHECK-NEXT:    sub sp, #8 -; CHECK-NEXT:    bl __gnu_h2f_ieee -; CHECK-NEXT:    add r1, sp, #4 -; CHECK-NEXT:    mov r2, sp -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    ldr r0, [sp, #4] -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    add sp, #8 -; CHECK-NEXT:    pop {r7, pc} +; GNU-LABEL: test_sincos_f16_only_use_sin: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r7, lr} +; GNU-NEXT:    sub sp, #8 +; GNU-NEXT:    bl __gnu_h2f_ieee +; GNU-NEXT:    add r1, sp, #4 +; GNU-NEXT:    mov r2, sp +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    ldr r0, [sp, #4] +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    add sp, #8 +; GNU-NEXT:    pop {r7, pc} +; +; GNUEABI-LABEL: test_sincos_f16_only_use_sin: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r11, lr} +; GNUEABI-NEXT:    push {r11, lr} +; GNUEABI-NEXT:    .pad #8 +; GNUEABI-NEXT:    sub sp, sp, #8 +; GNUEABI-NEXT:    bl __gnu_h2f_ieee +; GNUEABI-NEXT:    add r1, sp, #4 +; GNUEABI-NEXT:    mov r2, sp +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    ldr r0, [sp, #4] +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    add sp, sp, #8 +; GNUEABI-NEXT:    pop {r11, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_f16_only_use_sin: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {lr} +; IOS-NO-STRET-NEXT:    bl ___extendhfsf2 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    pop {lr} +; IOS-NO-STRET-NEXT:    bx lr +; +; IOS-WITH-STRET-LABEL: test_sincos_f16_only_use_sin: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {lr} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #8 +; IOS-WITH-STRET-NEXT:    bl ___extendhfsf2 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    ldr r0, [sp] +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    add sp, sp, #8 +; IOS-WITH-STRET-NEXT:    pop {lr} +; IOS-WITH-STRET-NEXT:    bx lr +; +; WATCHABI-LABEL: test_sincos_f16_only_use_sin: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 16 +; WATCHABI-NEXT:    vcvtb.f32.f16 s0, s0 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s0 +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { half, half } @llvm.sincos.f16(half %a)    %result.0 = extractvalue { half, half } %result, 0    ret half %result.0  }  define half @test_sincos_f16_only_use_cos(half %a) { -; CHECK-LABEL: test_sincos_f16_only_use_cos: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r7, lr} -; CHECK-NEXT:    sub sp, #8 -; CHECK-NEXT:    bl __gnu_h2f_ieee -; CHECK-NEXT:    add r1, sp, #4 -; CHECK-NEXT:    mov r2, sp -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    ldr r0, [sp] -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    add sp, #8 -; CHECK-NEXT:    pop {r7, pc} +; GNU-LABEL: test_sincos_f16_only_use_cos: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r7, lr} +; GNU-NEXT:    sub sp, #8 +; GNU-NEXT:    bl __gnu_h2f_ieee +; GNU-NEXT:    add r1, sp, #4 +; GNU-NEXT:    mov r2, sp +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    ldr r0, [sp] +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    add sp, #8 +; GNU-NEXT:    pop {r7, pc} +; +; GNUEABI-LABEL: test_sincos_f16_only_use_cos: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r11, lr} +; GNUEABI-NEXT:    push {r11, lr} +; GNUEABI-NEXT:    .pad #8 +; GNUEABI-NEXT:    sub sp, sp, #8 +; GNUEABI-NEXT:    bl __gnu_h2f_ieee +; GNUEABI-NEXT:    add r1, sp, #4 +; GNUEABI-NEXT:    mov r2, sp +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    ldr r0, [sp] +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    add sp, sp, #8 +; GNUEABI-NEXT:    pop {r11, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_f16_only_use_cos: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {lr} +; IOS-NO-STRET-NEXT:    bl ___extendhfsf2 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    pop {lr} +; IOS-NO-STRET-NEXT:    bx lr +; +; IOS-WITH-STRET-LABEL: test_sincos_f16_only_use_cos: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {lr} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #8 +; IOS-WITH-STRET-NEXT:    bl ___extendhfsf2 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    ldr r0, [sp, #4] +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    add sp, sp, #8 +; IOS-WITH-STRET-NEXT:    pop {lr} +; IOS-WITH-STRET-NEXT:    bx lr +; +; WATCHABI-LABEL: test_sincos_f16_only_use_cos: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 16 +; WATCHABI-NEXT:    vcvtb.f32.f16 s0, s0 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s1 +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { half, half } @llvm.sincos.f16(half %a)    %result.1 = extractvalue { half, half } %result, 1    ret half %result.1  }  define { <2 x half>, <2 x half> } @test_sincos_v2f16(<2 x half> %a) { -; CHECK-LABEL: test_sincos_v2f16: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r4, lr} -; CHECK-NEXT:    vpush {d8} -; CHECK-NEXT:    sub sp, #24 -; CHECK-NEXT:    mov r4, r0 -; CHECK-NEXT:    mov r0, r1 -; CHECK-NEXT:    bl __gnu_h2f_ieee -; CHECK-NEXT:    add r1, sp, #12 -; CHECK-NEXT:    add r2, sp, #8 -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    mov r0, r4 -; CHECK-NEXT:    bl __gnu_h2f_ieee -; CHECK-NEXT:    add r1, sp, #4 -; CHECK-NEXT:    mov r2, sp -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    ldr r0, [sp, #12] -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    ldr r1, [sp, #4] -; CHECK-NEXT:    strh.w r0, [sp, #22] -; CHECK-NEXT:    mov r0, r1 -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    strh.w r0, [sp, #20] -; CHECK-NEXT:    add r0, sp, #20 -; CHECK-NEXT:    vld1.32 {d8[0]}, [r0:32] -; CHECK-NEXT:    ldr r0, [sp, #8] -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    ldr r1, [sp] -; CHECK-NEXT:    strh.w r0, [sp, #18] -; CHECK-NEXT:    mov r0, r1 -; CHECK-NEXT:    bl __gnu_f2h_ieee -; CHECK-NEXT:    strh.w r0, [sp, #16] -; CHECK-NEXT:    add r0, sp, #16 -; CHECK-NEXT:    vmovl.u16 q9, d8 -; CHECK-NEXT:    vld1.32 {d16[0]}, [r0:32] -; CHECK-NEXT:    vmovl.u16 q8, d16 -; CHECK-NEXT:    vmov.32 r0, d18[0] -; CHECK-NEXT:    vmov.32 r1, d18[1] -; CHECK-NEXT:    vmov.32 r2, d16[0] -; CHECK-NEXT:    vmov.32 r3, d16[1] -; CHECK-NEXT:    add sp, #24 -; CHECK-NEXT:    vpop {d8} -; CHECK-NEXT:    pop {r4, pc} +; GNU-LABEL: test_sincos_v2f16: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r4, lr} +; GNU-NEXT:    vpush {d8} +; GNU-NEXT:    sub sp, #24 +; GNU-NEXT:    mov r4, r0 +; GNU-NEXT:    mov r0, r1 +; GNU-NEXT:    bl __gnu_h2f_ieee +; GNU-NEXT:    add r1, sp, #12 +; GNU-NEXT:    add r2, sp, #8 +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    mov r0, r4 +; GNU-NEXT:    bl __gnu_h2f_ieee +; GNU-NEXT:    add r1, sp, #4 +; GNU-NEXT:    mov r2, sp +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    ldr r0, [sp, #12] +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    ldr r1, [sp, #4] +; GNU-NEXT:    strh.w r0, [sp, #22] +; GNU-NEXT:    mov r0, r1 +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    strh.w r0, [sp, #20] +; GNU-NEXT:    add r0, sp, #20 +; GNU-NEXT:    vld1.32 {d8[0]}, [r0:32] +; GNU-NEXT:    ldr r0, [sp, #8] +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    ldr r1, [sp] +; GNU-NEXT:    strh.w r0, [sp, #18] +; GNU-NEXT:    mov r0, r1 +; GNU-NEXT:    bl __gnu_f2h_ieee +; GNU-NEXT:    strh.w r0, [sp, #16] +; GNU-NEXT:    add r0, sp, #16 +; GNU-NEXT:    vmovl.u16 q9, d8 +; GNU-NEXT:    vld1.32 {d16[0]}, [r0:32] +; GNU-NEXT:    vmovl.u16 q8, d16 +; GNU-NEXT:    vmov.32 r0, d18[0] +; GNU-NEXT:    vmov.32 r1, d18[1] +; GNU-NEXT:    vmov.32 r2, d16[0] +; GNU-NEXT:    vmov.32 r3, d16[1] +; GNU-NEXT:    add sp, #24 +; GNU-NEXT:    vpop {d8} +; GNU-NEXT:    pop {r4, pc} +; +; GNUEABI-LABEL: test_sincos_v2f16: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r4, lr} +; GNUEABI-NEXT:    push {r4, lr} +; GNUEABI-NEXT:    .vsave {d8} +; GNUEABI-NEXT:    vpush {d8} +; GNUEABI-NEXT:    .pad #24 +; GNUEABI-NEXT:    sub sp, sp, #24 +; GNUEABI-NEXT:    mov r4, r0 +; GNUEABI-NEXT:    mov r0, r1 +; GNUEABI-NEXT:    bl __gnu_h2f_ieee +; GNUEABI-NEXT:    add r1, sp, #12 +; GNUEABI-NEXT:    add r2, sp, #8 +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    mov r0, r4 +; GNUEABI-NEXT:    bl __gnu_h2f_ieee +; GNUEABI-NEXT:    add r1, sp, #4 +; GNUEABI-NEXT:    mov r2, sp +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    ldr r0, [sp, #12] +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    ldr r1, [sp, #4] +; GNUEABI-NEXT:    strh r0, [sp, #22] +; GNUEABI-NEXT:    mov r0, r1 +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    strh r0, [sp, #20] +; GNUEABI-NEXT:    add r0, sp, #20 +; GNUEABI-NEXT:    vld1.32 {d8[0]}, [r0:32] +; GNUEABI-NEXT:    ldr r0, [sp, #8] +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    ldr r1, [sp] +; GNUEABI-NEXT:    strh r0, [sp, #18] +; GNUEABI-NEXT:    mov r0, r1 +; GNUEABI-NEXT:    bl __gnu_f2h_ieee +; GNUEABI-NEXT:    strh r0, [sp, #16] +; GNUEABI-NEXT:    add r0, sp, #16 +; GNUEABI-NEXT:    vmovl.u16 q9, d8 +; GNUEABI-NEXT:    vld1.32 {d16[0]}, [r0:32] +; GNUEABI-NEXT:    vmovl.u16 q8, d16 +; GNUEABI-NEXT:    vmov.32 r0, d18[0] +; GNUEABI-NEXT:    vmov.32 r1, d18[1] +; GNUEABI-NEXT:    vmov.32 r2, d16[0] +; GNUEABI-NEXT:    vmov.32 r3, d16[1] +; GNUEABI-NEXT:    add sp, sp, #24 +; GNUEABI-NEXT:    vpop {d8} +; GNUEABI-NEXT:    pop {r4, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_v2f16: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {r4, r5, lr} +; IOS-NO-STRET-NEXT:    vpush {d8} +; IOS-NO-STRET-NEXT:    sub sp, sp, #8 +; IOS-NO-STRET-NEXT:    mov r5, r0 +; IOS-NO-STRET-NEXT:    mov r0, r1 +; IOS-NO-STRET-NEXT:    bl ___extendhfsf2 +; IOS-NO-STRET-NEXT:    mov r4, r0 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    strh r0, [sp, #6] +; IOS-NO-STRET-NEXT:    mov r0, r5 +; IOS-NO-STRET-NEXT:    bl ___extendhfsf2 +; IOS-NO-STRET-NEXT:    mov r5, r0 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    strh r0, [sp, #4] +; IOS-NO-STRET-NEXT:    add r0, sp, #4 +; IOS-NO-STRET-NEXT:    vld1.32 {d8[0]}, [r0:32] +; IOS-NO-STRET-NEXT:    mov r0, r4 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    strh r0, [sp, #2] +; IOS-NO-STRET-NEXT:    mov r0, r5 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    bl ___truncsfhf2 +; IOS-NO-STRET-NEXT:    strh r0, [sp] +; IOS-NO-STRET-NEXT:    mov r0, sp +; IOS-NO-STRET-NEXT:    vld1.32 {d16[0]}, [r0:32] +; IOS-NO-STRET-NEXT:    vmovl.u16 q9, d8 +; IOS-NO-STRET-NEXT:    vmovl.u16 q8, d16 +; IOS-NO-STRET-NEXT:    vmov.32 r0, d18[0] +; IOS-NO-STRET-NEXT:    vmov.32 r1, d18[1] +; IOS-NO-STRET-NEXT:    vmov.32 r2, d16[0] +; IOS-NO-STRET-NEXT:    vmov.32 r3, d16[1] +; IOS-NO-STRET-NEXT:    add sp, sp, #8 +; IOS-NO-STRET-NEXT:    vpop {d8} +; IOS-NO-STRET-NEXT:    pop {r4, r5, pc} +; +; IOS-WITH-STRET-LABEL: test_sincos_v2f16: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {r4, r5, lr} +; IOS-WITH-STRET-NEXT:    vpush {d8} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #24 +; IOS-WITH-STRET-NEXT:    mov r4, r0 +; IOS-WITH-STRET-NEXT:    mov r0, r1 +; IOS-WITH-STRET-NEXT:    bl ___extendhfsf2 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    add r0, sp, #8 +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    mov r0, r4 +; IOS-WITH-STRET-NEXT:    bl ___extendhfsf2 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    ldr r0, [sp, #8] +; IOS-WITH-STRET-NEXT:    ldr r4, [sp, #12] +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    ldm sp, {r1, r5} +; IOS-WITH-STRET-NEXT:    strh r0, [sp, #22] +; IOS-WITH-STRET-NEXT:    mov r0, r1 +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    strh r0, [sp, #20] +; IOS-WITH-STRET-NEXT:    add r0, sp, #20 +; IOS-WITH-STRET-NEXT:    vld1.32 {d8[0]}, [r0:32] +; IOS-WITH-STRET-NEXT:    mov r0, r4 +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    strh r0, [sp, #18] +; IOS-WITH-STRET-NEXT:    mov r0, r5 +; IOS-WITH-STRET-NEXT:    bl ___truncsfhf2 +; IOS-WITH-STRET-NEXT:    strh r0, [sp, #16] +; IOS-WITH-STRET-NEXT:    add r0, sp, #16 +; IOS-WITH-STRET-NEXT:    vmovl.u16 q9, d8 +; IOS-WITH-STRET-NEXT:    vld1.32 {d16[0]}, [r0:32] +; IOS-WITH-STRET-NEXT:    vmovl.u16 q8, d16 +; IOS-WITH-STRET-NEXT:    vmov.32 r0, d18[0] +; IOS-WITH-STRET-NEXT:    vmov.32 r1, d18[1] +; IOS-WITH-STRET-NEXT:    vmov.32 r2, d16[0] +; IOS-WITH-STRET-NEXT:    vmov.32 r3, d16[1] +; IOS-WITH-STRET-NEXT:    add sp, sp, #24 +; IOS-WITH-STRET-NEXT:    vpop {d8} +; IOS-WITH-STRET-NEXT:    pop {r4, r5, pc} +; +; WATCHABI-LABEL: test_sincos_v2f16: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    vpush {d10} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 16 +; WATCHABI-NEXT:    vpush {d8} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 24 +; WATCHABI-NEXT:    .cfi_offset d10, -16 +; WATCHABI-NEXT:    .cfi_offset d8, -24 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 32 +; WATCHABI-NEXT:    vmov.f32 s16, s0 +; WATCHABI-NEXT:    vcvtb.f32.f16 s0, s1 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s0 +; WATCHABI-NEXT:    vcvtb.f32.f16 s4, s16 +; WATCHABI-NEXT:    vmov r0, s0 +; WATCHABI-NEXT:    vmov.f32 s0, s4 +; WATCHABI-NEXT:    vmov.f32 s20, s1 +; WATCHABI-NEXT:    strh.w r0, [sp, #6] +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s0 +; WATCHABI-NEXT:    vmov r0, s0 +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s20 +; WATCHABI-NEXT:    strh.w r0, [sp, #4] +; WATCHABI-NEXT:    add r0, sp, #4 +; WATCHABI-NEXT:    vld1.32 {d16[0]}, [r0:32] +; WATCHABI-NEXT:    vmov r0, s0 +; WATCHABI-NEXT:    vcvtb.f16.f32 s0, s1 +; WATCHABI-NEXT:    strh.w r0, [sp, #2] +; WATCHABI-NEXT:    vmov r0, s0 +; WATCHABI-NEXT:    vmovl.u16 q0, d16 +; WATCHABI-NEXT:    strh.w r0, [sp] +; WATCHABI-NEXT:    mov r0, sp +; WATCHABI-NEXT:    vld1.32 {d18[0]}, [r0:32] +; WATCHABI-NEXT:    vmovl.u16 q1, d18 +; WATCHABI-NEXT:    vmov.f32 s2, s4 +; WATCHABI-NEXT:    vmov.f32 s3, s5 +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    vpop {d8} +; WATCHABI-NEXT:    vpop {d10} +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { <2 x half>, <2 x half> } @llvm.sincos.v2f16(<2 x half> %a)    ret { <2 x half>, <2 x half> } %result  }  define { float, float } @test_sincos_f32(float %a) { -; CHECK-LABEL: test_sincos_f32: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r7, lr} -; CHECK-NEXT:    sub sp, #8 -; CHECK-NEXT:    add r1, sp, #4 -; CHECK-NEXT:    mov r2, sp -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    ldrd r1, r0, [sp], #8 -; CHECK-NEXT:    pop {r7, pc} +; GNU-LABEL: test_sincos_f32: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r7, lr} +; GNU-NEXT:    sub sp, #8 +; GNU-NEXT:    add r1, sp, #4 +; GNU-NEXT:    mov r2, sp +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    ldrd r1, r0, [sp], #8 +; GNU-NEXT:    pop {r7, pc} +; +; GNUEABI-LABEL: test_sincos_f32: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r11, lr} +; GNUEABI-NEXT:    push {r11, lr} +; GNUEABI-NEXT:    .pad #8 +; GNUEABI-NEXT:    sub sp, sp, #8 +; GNUEABI-NEXT:    add r1, sp, #4 +; GNUEABI-NEXT:    mov r2, sp +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    ldr r0, [sp, #4] +; GNUEABI-NEXT:    ldr r1, [sp], #8 +; GNUEABI-NEXT:    pop {r11, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_f32: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {r4, r5, lr} +; IOS-NO-STRET-NEXT:    mov r4, r0 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    mov r5, r0 +; IOS-NO-STRET-NEXT:    mov r0, r4 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    mov r1, r0 +; IOS-NO-STRET-NEXT:    mov r0, r5 +; IOS-NO-STRET-NEXT:    pop {r4, r5, pc} +; +; IOS-WITH-STRET-LABEL: test_sincos_f32: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {lr} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #8 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    pop {r0, r1} +; IOS-WITH-STRET-NEXT:    pop {lr} +; IOS-WITH-STRET-NEXT:    bx lr +; +; WATCHABI-LABEL: test_sincos_f32: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 16 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { float, float } @llvm.sincos.f32(float %a)    ret { float, float } %result  }  define { <2 x float>, <2 x float> } @test_sincos_v2f32(<2 x float> %a) { -; CHECK-LABEL: test_sincos_v2f32: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r7, lr} -; CHECK-NEXT:    vpush {d8} -; CHECK-NEXT:    sub sp, #16 -; CHECK-NEXT:    vmov d8, r0, r1 -; CHECK-NEXT:    add r1, sp, #4 -; CHECK-NEXT:    mov r2, sp -; CHECK-NEXT:    vmov r0, s17 -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    vmov r0, s16 -; CHECK-NEXT:    add r1, sp, #12 -; CHECK-NEXT:    add r2, sp, #8 -; CHECK-NEXT:    bl sincosf -; CHECK-NEXT:    vldr s1, [sp, #4] -; CHECK-NEXT:    vldr s3, [sp] -; CHECK-NEXT:    vldr s0, [sp, #12] -; CHECK-NEXT:    vldr s2, [sp, #8] -; CHECK-NEXT:    vmov r0, r1, d0 -; CHECK-NEXT:    vmov r2, r3, d1 -; CHECK-NEXT:    add sp, #16 -; CHECK-NEXT:    vpop {d8} -; CHECK-NEXT:    pop {r7, pc} +; GNU-LABEL: test_sincos_v2f32: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r7, lr} +; GNU-NEXT:    vpush {d8} +; GNU-NEXT:    sub sp, #16 +; GNU-NEXT:    vmov d8, r0, r1 +; GNU-NEXT:    add r1, sp, #4 +; GNU-NEXT:    mov r2, sp +; GNU-NEXT:    vmov r0, s17 +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    vmov r0, s16 +; GNU-NEXT:    add r1, sp, #12 +; GNU-NEXT:    add r2, sp, #8 +; GNU-NEXT:    bl sincosf +; GNU-NEXT:    vldr s1, [sp, #4] +; GNU-NEXT:    vldr s3, [sp] +; GNU-NEXT:    vldr s0, [sp, #12] +; GNU-NEXT:    vldr s2, [sp, #8] +; GNU-NEXT:    vmov r0, r1, d0 +; GNU-NEXT:    vmov r2, r3, d1 +; GNU-NEXT:    add sp, #16 +; GNU-NEXT:    vpop {d8} +; GNU-NEXT:    pop {r7, pc} +; +; GNUEABI-LABEL: test_sincos_v2f32: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r11, lr} +; GNUEABI-NEXT:    push {r11, lr} +; GNUEABI-NEXT:    .vsave {d8} +; GNUEABI-NEXT:    vpush {d8} +; GNUEABI-NEXT:    .pad #16 +; GNUEABI-NEXT:    sub sp, sp, #16 +; GNUEABI-NEXT:    vmov d8, r0, r1 +; GNUEABI-NEXT:    add r1, sp, #4 +; GNUEABI-NEXT:    mov r2, sp +; GNUEABI-NEXT:    vmov r0, s17 +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    vmov r0, s16 +; GNUEABI-NEXT:    add r1, sp, #12 +; GNUEABI-NEXT:    add r2, sp, #8 +; GNUEABI-NEXT:    bl sincosf +; GNUEABI-NEXT:    vldr s1, [sp, #4] +; GNUEABI-NEXT:    vldr s3, [sp] +; GNUEABI-NEXT:    vldr s0, [sp, #12] +; GNUEABI-NEXT:    vldr s2, [sp, #8] +; GNUEABI-NEXT:    vmov r0, r1, d0 +; GNUEABI-NEXT:    vmov r2, r3, d1 +; GNUEABI-NEXT:    add sp, sp, #16 +; GNUEABI-NEXT:    vpop {d8} +; GNUEABI-NEXT:    pop {r11, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_v2f32: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {r4, r5, r6, r7, lr} +; IOS-NO-STRET-NEXT:    vpush {d8} +; IOS-NO-STRET-NEXT:    vmov d8, r0, r1 +; IOS-NO-STRET-NEXT:    vmov r4, s17 +; IOS-NO-STRET-NEXT:    mov r0, r4 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    mov r5, r0 +; IOS-NO-STRET-NEXT:    mov r0, r4 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    vmov r6, s16 +; IOS-NO-STRET-NEXT:    mov r4, r0 +; IOS-NO-STRET-NEXT:    mov r0, r6 +; IOS-NO-STRET-NEXT:    bl _sinf +; IOS-NO-STRET-NEXT:    mov r7, r0 +; IOS-NO-STRET-NEXT:    mov r0, r6 +; IOS-NO-STRET-NEXT:    bl _cosf +; IOS-NO-STRET-NEXT:    mov r2, r0 +; IOS-NO-STRET-NEXT:    mov r0, r7 +; IOS-NO-STRET-NEXT:    mov r1, r5 +; IOS-NO-STRET-NEXT:    mov r3, r4 +; IOS-NO-STRET-NEXT:    vpop {d8} +; IOS-NO-STRET-NEXT:    pop {r4, r5, r6, r7, pc} +; +; IOS-WITH-STRET-LABEL: test_sincos_v2f32: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {lr} +; IOS-WITH-STRET-NEXT:    vpush {d8} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #16 +; IOS-WITH-STRET-NEXT:    vmov d8, r0, r1 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    vmov r1, s17 +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    vmov r1, s16 +; IOS-WITH-STRET-NEXT:    add r0, sp, #8 +; IOS-WITH-STRET-NEXT:    bl ___sincosf_stret +; IOS-WITH-STRET-NEXT:    vldr s1, [sp] +; IOS-WITH-STRET-NEXT:    vldr s3, [sp, #4] +; IOS-WITH-STRET-NEXT:    vldr s0, [sp, #8] +; IOS-WITH-STRET-NEXT:    vldr s2, [sp, #12] +; IOS-WITH-STRET-NEXT:    vmov r0, r1, d0 +; IOS-WITH-STRET-NEXT:    vmov r2, r3, d1 +; IOS-WITH-STRET-NEXT:    add sp, sp, #16 +; IOS-WITH-STRET-NEXT:    vpop {d8} +; IOS-WITH-STRET-NEXT:    pop {lr} +; IOS-WITH-STRET-NEXT:    bx lr +; +; WATCHABI-LABEL: test_sincos_v2f32: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    vpush {d8, d9, d10} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 32 +; WATCHABI-NEXT:    .cfi_offset d10, -16 +; WATCHABI-NEXT:    .cfi_offset d9, -24 +; WATCHABI-NEXT:    .cfi_offset d8, -32 +; WATCHABI-NEXT:    vmov.f64 d8, d0 +; WATCHABI-NEXT:    vmov.f32 s0, s17 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vmov.f32 s19, s0 +; WATCHABI-NEXT:    vmov.f32 s0, s16 +; WATCHABI-NEXT:    vmov.f32 s21, s1 +; WATCHABI-NEXT:    bl ___sincosf_stret +; WATCHABI-NEXT:    vmov.f32 s20, s1 +; WATCHABI-NEXT:    vmov.f32 s18, s0 +; WATCHABI-NEXT:    vmov.f64 d1, d10 +; WATCHABI-NEXT:    vmov.f64 d0, d9 +; WATCHABI-NEXT:    vpop {d8, d9, d10} +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { <2 x float>, <2 x float> } @llvm.sincos.v2f32(<2 x float> %a)    ret { <2 x float>, <2 x float> } %result  }  define { double, double } @test_sincos_f64(double %a) { -; CHECK-LABEL: test_sincos_f64: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r7, lr} -; CHECK-NEXT:    sub sp, #16 -; CHECK-NEXT:    add r2, sp, #8 -; CHECK-NEXT:    mov r3, sp -; CHECK-NEXT:    bl sincos -; CHECK-NEXT:    ldrd r0, r1, [sp, #8] -; CHECK-NEXT:    ldrd r2, r3, [sp], #16 -; CHECK-NEXT:    pop {r7, pc} +; GNU-LABEL: test_sincos_f64: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r7, lr} +; GNU-NEXT:    sub sp, #16 +; GNU-NEXT:    add r2, sp, #8 +; GNU-NEXT:    mov r3, sp +; GNU-NEXT:    bl sincos +; GNU-NEXT:    ldrd r0, r1, [sp, #8] +; GNU-NEXT:    ldrd r2, r3, [sp], #16 +; GNU-NEXT:    pop {r7, pc} +; +; GNUEABI-LABEL: test_sincos_f64: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r11, lr} +; GNUEABI-NEXT:    push {r11, lr} +; GNUEABI-NEXT:    .pad #16 +; GNUEABI-NEXT:    sub sp, sp, #16 +; GNUEABI-NEXT:    add r2, sp, #8 +; GNUEABI-NEXT:    mov r3, sp +; GNUEABI-NEXT:    bl sincos +; GNUEABI-NEXT:    ldm sp, {r2, r3} +; GNUEABI-NEXT:    ldr r0, [sp, #8] +; GNUEABI-NEXT:    ldr r1, [sp, #12] +; GNUEABI-NEXT:    add sp, sp, #16 +; GNUEABI-NEXT:    pop {r11, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_f64: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {r4, r5, r6, r7, lr} +; IOS-NO-STRET-NEXT:    mov r4, r1 +; IOS-NO-STRET-NEXT:    mov r5, r0 +; IOS-NO-STRET-NEXT:    bl _sin +; IOS-NO-STRET-NEXT:    mov r6, r0 +; IOS-NO-STRET-NEXT:    mov r7, r1 +; IOS-NO-STRET-NEXT:    mov r0, r5 +; IOS-NO-STRET-NEXT:    mov r1, r4 +; IOS-NO-STRET-NEXT:    bl _cos +; IOS-NO-STRET-NEXT:    mov r2, r0 +; IOS-NO-STRET-NEXT:    mov r3, r1 +; IOS-NO-STRET-NEXT:    mov r0, r6 +; IOS-NO-STRET-NEXT:    mov r1, r7 +; IOS-NO-STRET-NEXT:    pop {r4, r5, r6, r7, pc} +; +; IOS-WITH-STRET-LABEL: test_sincos_f64: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {lr} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #16 +; IOS-WITH-STRET-NEXT:    mov r2, r1 +; IOS-WITH-STRET-NEXT:    mov r1, r0 +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    bl ___sincos_stret +; IOS-WITH-STRET-NEXT:    vldr d16, [sp, #8] +; IOS-WITH-STRET-NEXT:    ldm sp, {r0, r1} +; IOS-WITH-STRET-NEXT:    vmov r2, r3, d16 +; IOS-WITH-STRET-NEXT:    add sp, sp, #16 +; IOS-WITH-STRET-NEXT:    pop {lr} +; IOS-WITH-STRET-NEXT:    bx lr +; +; WATCHABI-LABEL: test_sincos_f64: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 16 +; WATCHABI-NEXT:    bl ___sincos_stret +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { double, double } @llvm.sincos.f64(double %a)    ret { double, double } %result  }  define { <2 x double>, <2 x double> } @test_sincos_v2f64(<2 x double> %a) { -; CHECK-LABEL: test_sincos_v2f64: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r4, lr} -; CHECK-NEXT:    sub sp, #32 -; CHECK-NEXT:    mov r1, r3 -; CHECK-NEXT:    mov r12, r2 -; CHECK-NEXT:    add r2, sp, #24 -; CHECK-NEXT:    add r3, sp, #16 -; CHECK-NEXT:    mov r4, r0 -; CHECK-NEXT:    mov r0, r12 -; CHECK-NEXT:    bl sincos -; CHECK-NEXT:    ldrd r0, r1, [sp, #40] -; CHECK-NEXT:    add r2, sp, #8 -; CHECK-NEXT:    mov r3, sp -; CHECK-NEXT:    bl sincos -; CHECK-NEXT:    vldr d19, [sp, #8] -; CHECK-NEXT:    vldr d18, [sp, #24] -; CHECK-NEXT:    vldr d17, [sp] -; CHECK-NEXT:    vldr d16, [sp, #16] -; CHECK-NEXT:    vst1.64 {d18, d19}, [r4]! -; CHECK-NEXT:    vst1.64 {d16, d17}, [r4] -; CHECK-NEXT:    add sp, #32 -; CHECK-NEXT:    pop {r4, pc} +; GNU-LABEL: test_sincos_v2f64: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r4, lr} +; GNU-NEXT:    sub sp, #32 +; GNU-NEXT:    mov r1, r3 +; GNU-NEXT:    mov r12, r2 +; GNU-NEXT:    add r2, sp, #24 +; GNU-NEXT:    add r3, sp, #16 +; GNU-NEXT:    mov r4, r0 +; GNU-NEXT:    mov r0, r12 +; GNU-NEXT:    bl sincos +; GNU-NEXT:    ldrd r0, r1, [sp, #40] +; GNU-NEXT:    add r2, sp, #8 +; GNU-NEXT:    mov r3, sp +; GNU-NEXT:    bl sincos +; GNU-NEXT:    vldr d19, [sp, #8] +; GNU-NEXT:    vldr d18, [sp, #24] +; GNU-NEXT:    vldr d17, [sp] +; GNU-NEXT:    vldr d16, [sp, #16] +; GNU-NEXT:    vst1.64 {d18, d19}, [r4]! +; GNU-NEXT:    vst1.64 {d16, d17}, [r4] +; GNU-NEXT:    add sp, #32 +; GNU-NEXT:    pop {r4, pc} +; +; GNUEABI-LABEL: test_sincos_v2f64: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r4, lr} +; GNUEABI-NEXT:    push {r4, lr} +; GNUEABI-NEXT:    .pad #32 +; GNUEABI-NEXT:    sub sp, sp, #32 +; GNUEABI-NEXT:    mov r1, r3 +; GNUEABI-NEXT:    mov r12, r2 +; GNUEABI-NEXT:    add r2, sp, #24 +; GNUEABI-NEXT:    add r3, sp, #16 +; GNUEABI-NEXT:    mov r4, r0 +; GNUEABI-NEXT:    mov r0, r12 +; GNUEABI-NEXT:    bl sincos +; GNUEABI-NEXT:    ldr r0, [sp, #40] +; GNUEABI-NEXT:    add r2, sp, #8 +; GNUEABI-NEXT:    ldr r1, [sp, #44] +; GNUEABI-NEXT:    mov r3, sp +; GNUEABI-NEXT:    bl sincos +; GNUEABI-NEXT:    vldr d19, [sp, #8] +; GNUEABI-NEXT:    vldr d18, [sp, #24] +; GNUEABI-NEXT:    vldr d17, [sp] +; GNUEABI-NEXT:    vldr d16, [sp, #16] +; GNUEABI-NEXT:    vst1.64 {d18, d19}, [r4]! +; GNUEABI-NEXT:    vst1.64 {d16, d17}, [r4] +; GNUEABI-NEXT:    add sp, sp, #32 +; GNUEABI-NEXT:    pop {r4, pc} +; +; IOS-NO-STRET-LABEL: test_sincos_v2f64: +; IOS-NO-STRET:       @ %bb.0: +; IOS-NO-STRET-NEXT:    push {r4, r5, r6, r7, r8, r10, r11, lr} +; IOS-NO-STRET-NEXT:    vpush {d8, d9, d10, d11} +; IOS-NO-STRET-NEXT:    ldr r8, [sp, #64] +; IOS-NO-STRET-NEXT:    mov r7, r1 +; IOS-NO-STRET-NEXT:    mov r4, r0 +; IOS-NO-STRET-NEXT:    mov r0, r3 +; IOS-NO-STRET-NEXT:    mov r6, r3 +; IOS-NO-STRET-NEXT:    mov r10, r2 +; IOS-NO-STRET-NEXT:    mov r1, r8 +; IOS-NO-STRET-NEXT:    bl _sin +; IOS-NO-STRET-NEXT:    mov r11, r0 +; IOS-NO-STRET-NEXT:    mov r5, r1 +; IOS-NO-STRET-NEXT:    mov r0, r6 +; IOS-NO-STRET-NEXT:    mov r1, r8 +; IOS-NO-STRET-NEXT:    bl _cos +; IOS-NO-STRET-NEXT:    vmov d9, r0, r1 +; IOS-NO-STRET-NEXT:    mov r0, r7 +; IOS-NO-STRET-NEXT:    mov r1, r10 +; IOS-NO-STRET-NEXT:    vmov d11, r11, r5 +; IOS-NO-STRET-NEXT:    bl _sin +; IOS-NO-STRET-NEXT:    vmov d10, r0, r1 +; IOS-NO-STRET-NEXT:    mov r0, r7 +; IOS-NO-STRET-NEXT:    mov r1, r10 +; IOS-NO-STRET-NEXT:    bl _cos +; IOS-NO-STRET-NEXT:    vmov d8, r0, r1 +; IOS-NO-STRET-NEXT:    vst1.32 {d10, d11}, [r4]! +; IOS-NO-STRET-NEXT:    vst1.32 {d8, d9}, [r4] +; IOS-NO-STRET-NEXT:    vpop {d8, d9, d10, d11} +; IOS-NO-STRET-NEXT:    pop {r4, r5, r6, r7, r8, r10, r11, pc} +; +; IOS-WITH-STRET-LABEL: test_sincos_v2f64: +; IOS-WITH-STRET:       @ %bb.0: +; IOS-WITH-STRET-NEXT:    push {r4, r5, r6, lr} +; IOS-WITH-STRET-NEXT:    sub sp, sp, #32 +; IOS-WITH-STRET-NEXT:    mov r4, r2 +; IOS-WITH-STRET-NEXT:    ldr r2, [sp, #48] +; IOS-WITH-STRET-NEXT:    mov r6, r0 +; IOS-WITH-STRET-NEXT:    add r0, sp, #16 +; IOS-WITH-STRET-NEXT:    mov r5, r1 +; IOS-WITH-STRET-NEXT:    mov r1, r3 +; IOS-WITH-STRET-NEXT:    bl ___sincos_stret +; IOS-WITH-STRET-NEXT:    mov r0, sp +; IOS-WITH-STRET-NEXT:    mov r1, r5 +; IOS-WITH-STRET-NEXT:    mov r2, r4 +; IOS-WITH-STRET-NEXT:    bl ___sincos_stret +; IOS-WITH-STRET-NEXT:    vldr d17, [sp, #16] +; IOS-WITH-STRET-NEXT:    vldr d16, [sp] +; IOS-WITH-STRET-NEXT:    vldr d19, [sp, #24] +; IOS-WITH-STRET-NEXT:    vldr d18, [sp, #8] +; IOS-WITH-STRET-NEXT:    vst1.32 {d16, d17}, [r6]! +; IOS-WITH-STRET-NEXT:    vst1.32 {d18, d19}, [r6] +; IOS-WITH-STRET-NEXT:    add sp, sp, #32 +; IOS-WITH-STRET-NEXT:    pop {r4, r5, r6, pc} +; +; WATCHABI-LABEL: test_sincos_v2f64: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push {r7, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 8 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    vpush {d8, d9, d10, d11, d12, d13} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 56 +; WATCHABI-NEXT:    .cfi_offset d13, -16 +; WATCHABI-NEXT:    .cfi_offset d12, -24 +; WATCHABI-NEXT:    .cfi_offset d11, -32 +; WATCHABI-NEXT:    .cfi_offset d10, -40 +; WATCHABI-NEXT:    .cfi_offset d9, -48 +; WATCHABI-NEXT:    .cfi_offset d8, -56 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 64 +; WATCHABI-NEXT:    vorr q4, q0, q0 +; WATCHABI-NEXT:    vorr d0, d9, d9 +; WATCHABI-NEXT:    bl ___sincos_stret +; WATCHABI-NEXT:    vorr d11, d0, d0 +; WATCHABI-NEXT:    vorr d0, d8, d8 +; WATCHABI-NEXT:    vorr d13, d1, d1 +; WATCHABI-NEXT:    bl ___sincos_stret +; WATCHABI-NEXT:    vorr d12, d1, d1 +; WATCHABI-NEXT:    vorr d10, d0, d0 +; WATCHABI-NEXT:    vorr q1, q6, q6 +; WATCHABI-NEXT:    vorr q0, q5, q5 +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    vpop {d8, d9, d10, d11, d12, d13} +; WATCHABI-NEXT:    pop {r7, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { <2 x double>, <2 x double> } @llvm.sincos.v2f64(<2 x double> %a)    ret { <2 x double>, <2 x double> } %result  }  define { fp128, fp128 } @test_sincos_f128(fp128 %a) { -; CHECK-LABEL: test_sincos_f128: -; CHECK:       @ %bb.0: -; CHECK-NEXT:    push {r4, r5, r7, lr} -; CHECK-NEXT:    sub sp, #40 -; CHECK-NEXT:    mov r12, r3 -; CHECK-NEXT:    ldr r3, [sp, #56] -; CHECK-NEXT:    add.w lr, sp, #8 -; CHECK-NEXT:    mov r4, r0 -; CHECK-NEXT:    add r0, sp, #24 -; CHECK-NEXT:    strd r0, lr, [sp] -; CHECK-NEXT:    mov r0, r1 -; CHECK-NEXT:    mov r1, r2 -; CHECK-NEXT:    mov r2, r12 -; CHECK-NEXT:    bl sincosl -; CHECK-NEXT:    ldrd r2, r3, [sp, #16] -; CHECK-NEXT:    ldrd r12, r1, [sp, #8] -; CHECK-NEXT:    str r3, [r4, #28] -; CHECK-NEXT:    ldrd r3, r5, [sp, #32] -; CHECK-NEXT:    ldrd lr, r0, [sp, #24] -; CHECK-NEXT:    strd r1, r2, [r4, #20] -; CHECK-NEXT:    add.w r1, r4, #8 -; CHECK-NEXT:    stm.w r1, {r3, r5, r12} -; CHECK-NEXT:    strd lr, r0, [r4] -; CHECK-NEXT:    add sp, #40 -; CHECK-NEXT:    pop {r4, r5, r7, pc} +; GNU-LABEL: test_sincos_f128: +; GNU:       @ %bb.0: +; GNU-NEXT:    push {r4, r5, r7, lr} +; GNU-NEXT:    sub sp, #40 +; GNU-NEXT:    mov r12, r3 +; GNU-NEXT:    ldr r3, [sp, #56] +; GNU-NEXT:    add.w lr, sp, #8 +; GNU-NEXT:    mov r4, r0 +; GNU-NEXT:    add r0, sp, #24 +; GNU-NEXT:    strd r0, lr, [sp] +; GNU-NEXT:    mov r0, r1 +; GNU-NEXT:    mov r1, r2 +; GNU-NEXT:    mov r2, r12 +; GNU-NEXT:    bl sincosl +; GNU-NEXT:    ldrd r2, r3, [sp, #16] +; GNU-NEXT:    ldrd r12, r1, [sp, #8] +; GNU-NEXT:    str r3, [r4, #28] +; GNU-NEXT:    ldrd r3, r5, [sp, #32] +; GNU-NEXT:    ldrd lr, r0, [sp, #24] +; GNU-NEXT:    strd r1, r2, [r4, #20] +; GNU-NEXT:    add.w r1, r4, #8 +; GNU-NEXT:    stm.w r1, {r3, r5, r12} +; GNU-NEXT:    strd lr, r0, [r4] +; GNU-NEXT:    add sp, #40 +; GNU-NEXT:    pop {r4, r5, r7, pc} +; +; GNUEABI-LABEL: test_sincos_f128: +; GNUEABI:       @ %bb.0: +; GNUEABI-NEXT:    .save {r4, r5, r11, lr} +; GNUEABI-NEXT:    push {r4, r5, r11, lr} +; GNUEABI-NEXT:    .pad #40 +; GNUEABI-NEXT:    sub sp, sp, #40 +; GNUEABI-NEXT:    mov r12, r3 +; GNUEABI-NEXT:    ldr r3, [sp, #56] +; GNUEABI-NEXT:    mov r4, r0 +; GNUEABI-NEXT:    add r0, sp, #24 +; GNUEABI-NEXT:    add r5, sp, #8 +; GNUEABI-NEXT:    stm sp, {r0, r5} +; GNUEABI-NEXT:    mov r0, r1 +; GNUEABI-NEXT:    mov r1, r2 +; GNUEABI-NEXT:    mov r2, r12 +; GNUEABI-NEXT:    bl sincosl +; GNUEABI-NEXT:    add r3, sp, #12 +; GNUEABI-NEXT:    ldr r12, [sp, #8] +; GNUEABI-NEXT:    ldm r3, {r1, r2, r3} +; GNUEABI-NEXT:    str r3, [r4, #28] +; GNUEABI-NEXT:    ldr r0, [sp, #32] +; GNUEABI-NEXT:    ldr lr, [sp, #24] +; GNUEABI-NEXT:    ldr r5, [sp, #28] +; GNUEABI-NEXT:    ldr r3, [sp, #36] +; GNUEABI-NEXT:    str r2, [r4, #24] +; GNUEABI-NEXT:    str r1, [r4, #20] +; GNUEABI-NEXT:    add r1, r4, #8 +; GNUEABI-NEXT:    stm r1, {r0, r3, r12} +; GNUEABI-NEXT:    str r5, [r4, #4] +; GNUEABI-NEXT:    str lr, [r4] +; GNUEABI-NEXT:    add sp, sp, #40 +; GNUEABI-NEXT:    pop {r4, r5, r11, pc} +; +; IOS-LABEL: test_sincos_f128: +; IOS:       @ %bb.0: +; IOS-NEXT:    push {r4, r5, r6, r7, r8, lr} +; IOS-NEXT:    ldr r8, [sp, #24] +; IOS-NEXT:    mov r4, r0 +; IOS-NEXT:    mov r5, r3 +; IOS-NEXT:    mov r6, r2 +; IOS-NEXT:    mov r7, r1 +; IOS-NEXT:    mov r0, r1 +; IOS-NEXT:    mov r1, r2 +; IOS-NEXT:    mov r2, r3 +; IOS-NEXT:    mov r3, r8 +; IOS-NEXT:    bl _cosl +; IOS-NEXT:    add r9, r4, #16 +; IOS-NEXT:    stm r9, {r0, r1, r2, r3} +; IOS-NEXT:    mov r0, r7 +; IOS-NEXT:    mov r1, r6 +; IOS-NEXT:    mov r2, r5 +; IOS-NEXT:    mov r3, r8 +; IOS-NEXT:    bl _sinl +; IOS-NEXT:    stm r4, {r0, r1, r2, r3} +; IOS-NEXT:    pop {r4, r5, r6, r7, r8, pc} +; +; WATCHABI-LABEL: test_sincos_f128: +; WATCHABI:         .cfi_startproc +; WATCHABI-NEXT:  @ %bb.0: +; WATCHABI-NEXT:    push.w {r4, r5, r6, r7, r8, lr} +; WATCHABI-NEXT:    .cfi_def_cfa_offset 24 +; WATCHABI-NEXT:    .cfi_offset lr, -4 +; WATCHABI-NEXT:    .cfi_offset r7, -8 +; WATCHABI-NEXT:    .cfi_offset r6, -12 +; WATCHABI-NEXT:    .cfi_offset r5, -16 +; WATCHABI-NEXT:    .cfi_offset r4, -20 +; WATCHABI-NEXT:    .cfi_offset r8, -24 +; WATCHABI-NEXT:    sub sp, #8 +; WATCHABI-NEXT:    .cfi_def_cfa_offset 32 +; WATCHABI-NEXT:    ldr.w r8, [sp, #32] +; WATCHABI-NEXT:    mov r4, r0 +; WATCHABI-NEXT:    mov r5, r3 +; WATCHABI-NEXT:    mov r6, r2 +; WATCHABI-NEXT:    mov r7, r1 +; WATCHABI-NEXT:    mov r0, r1 +; WATCHABI-NEXT:    mov r1, r2 +; WATCHABI-NEXT:    mov r2, r3 +; WATCHABI-NEXT:    mov r3, r8 +; WATCHABI-NEXT:    bl _cosl +; WATCHABI-NEXT:    add.w r9, r4, #16 +; WATCHABI-NEXT:    stm.w r9, {r0, r1, r2, r3} +; WATCHABI-NEXT:    mov r0, r7 +; WATCHABI-NEXT:    mov r1, r6 +; WATCHABI-NEXT:    mov r2, r5 +; WATCHABI-NEXT:    mov r3, r8 +; WATCHABI-NEXT:    bl _sinl +; WATCHABI-NEXT:    stm r4!, {r0, r1, r2, r3} +; WATCHABI-NEXT:    add sp, #8 +; WATCHABI-NEXT:    pop.w {r4, r5, r6, r7, r8, pc} +; WATCHABI-NEXT:    .cfi_endproc    %result = call { fp128, fp128 } @llvm.sincos.f16(fp128 %a)    ret { fp128, fp128 } %result  } diff --git a/llvm/test/CodeGen/BPF/bpf_trap.ll b/llvm/test/CodeGen/BPF/bpf_trap.ll new file mode 100644 index 0000000..ab8df5f --- /dev/null +++ b/llvm/test/CodeGen/BPF/bpf_trap.ll @@ -0,0 +1,32 @@ +; RUN: llc < %s | FileCheck %s +; +target triple = "bpf" + +define i32 @test(i8 %x) { +entry: +  %0 = and i8 %x, 3 +  switch i8 %0, label %default.unreachable4 [ +    i8 0, label %return +    i8 1, label %sw.bb1 +    i8 2, label %sw.bb2 +    i8 3, label %sw.bb3 +  ] + +sw.bb1:                                           ; preds = %entry +  br label %return + +sw.bb2:                                           ; preds = %entry +  br label %return + +sw.bb3:                                           ; preds = %entry +  br label %return + +default.unreachable4:                             ; preds = %entry +  unreachable + +return:                                           ; preds = %entry, %sw.bb3, %sw.bb2, %sw.bb1 +  %retval.0 = phi i32 [ 12, %sw.bb1 ], [ 43, %sw.bb2 ], [ 54, %sw.bb3 ], [ 32, %entry ] +  ret i32 %retval.0 +} + +; CHECK-NOT: __bpf_trap diff --git a/llvm/test/CodeGen/NVPTX/cmpxchg-unsupported-syncscope.err.ll b/llvm/test/CodeGen/NVPTX/cmpxchg-unsupported-syncscope.err.ll new file mode 100644 index 0000000..d3853e2 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/cmpxchg-unsupported-syncscope.err.ll @@ -0,0 +1,11 @@ +; RUN: not llc -mcpu=sm_100a -mtriple=nvptx64 -mattr=+ptx86 %s 2>&1 | FileCheck %s + +; Test that we get a clear error message when using an unsupported syncscope. + +; CHECK: NVPTX backend does not support syncscope "agent" +; CHECK: Supported syncscopes are: singlethread, <empty string>, block, cluster, device +define i32 @cmpxchg_unsupported_syncscope_agent(ptr %addr, i32 %cmp, i32 %new) { +  %result = cmpxchg ptr %addr, i32 %cmp, i32 %new syncscope("agent") monotonic monotonic +  %value = extractvalue { i32, i1 } %result, 0 +  ret i32 %value +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll new file mode 100644 index 0000000..5cb55f1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll @@ -0,0 +1,1341 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN:   < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vloxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vloxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vloxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x double> %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll new file mode 100644 index 0000000..fafd45b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll @@ -0,0 +1,5100 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vloxei32.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32( +    <vscale x 16 x i8> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vloxei32.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32( +    <vscale x 16 x i16> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32( +    <vscale x 16 x i32> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vloxei32.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32( +    <vscale x 16 x half> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32( +    <vscale x 16 x float> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16( +    <vscale x 16 x i8> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16( +    <vscale x 32 x i8> poison, +    ptr %0, +    <vscale x 32 x i16> %1, +    iXLen %2) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16( +    <vscale x 16 x i16> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16( +    <vscale x 32 x i16> poison, +    ptr %0, +    <vscale x 32 x i16> %1, +    iXLen %2) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16( +    <vscale x 16 x i32> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16( +    <vscale x 16 x half> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16( +    <vscale x 32 x half> poison, +    ptr %0, +    <vscale x 32 x i16> %1, +    iXLen %2) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16( +    <vscale x 16 x float> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8( +    <vscale x 16 x i8> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8( +    <vscale x 32 x i8> poison, +    ptr %0, +    <vscale x 32 x i8> %1, +    iXLen %2) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  iXLen); + +define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8( +    <vscale x 64 x i8> poison, +    ptr %0, +    <vscale x 64 x i8> %1, +    iXLen %2) + +  ret <vscale x 64 x i8> %a +} + +declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  <vscale x 64 x i1>, +  iXLen, +  iXLen); + +define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8( +    <vscale x 64 x i8> %0, +    ptr %1, +    <vscale x 64 x i8> %2, +    <vscale x 64 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 64 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8( +    <vscale x 16 x i16> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8( +    <vscale x 32 x i16> poison, +    ptr %0, +    <vscale x 32 x i8> %1, +    iXLen %2) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8( +    <vscale x 16 x i32> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8( +    <vscale x 16 x half> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8( +    <vscale x 32 x half> poison, +    ptr %0, +    <vscale x 32 x i8> %1, +    iXLen %2) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8( +    <vscale x 16 x float> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vloxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vloxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x double> %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll new file mode 100644 index 0000000..916af25 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll @@ -0,0 +1,1341 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN:   < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vluxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vluxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vluxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei64.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei64.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei64.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i64> %1, +    i64 %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64, +  i64); + +define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i64> %1, +    i64 %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64, +  i64); + +define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i64> %1, +    i64 %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64, +  i64); + +define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei64.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i64> %1, +    i64 %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64, +  i64); + +define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4, i64 1) + +  ret <vscale x 8 x double> %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll new file mode 100644 index 0000000..8dd32a1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll @@ -0,0 +1,5100 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vluxei32.v v10, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32( +    <vscale x 16 x i8> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vluxei32.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32( +    <vscale x 16 x i16> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32( +    <vscale x 16 x i32> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vluxei32.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32( +    <vscale x 16 x half> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32( +    <vscale x 16 x float> poison, +    ptr %0, +    <vscale x 16 x i32> %1, +    iXLen %2) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei32.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i32> %1, +    iXLen %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i32> %1, +    iXLen %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i32> %1, +    iXLen %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i32> %1, +    iXLen %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v10, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v12, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16( +    <vscale x 16 x i8> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v16, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16( +    <vscale x 32 x i8> poison, +    ptr %0, +    <vscale x 32 x i16> %1, +    iXLen %2) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16( +    <vscale x 16 x i16> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16( +    <vscale x 32 x i16> poison, +    ptr %0, +    <vscale x 32 x i16> %1, +    iXLen %2) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16( +    <vscale x 16 x i32> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16( +    <vscale x 16 x half> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16( +    <vscale x 32 x half> poison, +    ptr %0, +    <vscale x 32 x i16> %1, +    iXLen %2) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16( +    <vscale x 16 x float> poison, +    ptr %0, +    <vscale x 16 x i16> %1, +    iXLen %2) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei16.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i16> %1, +    iXLen %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i16> %1, +    iXLen %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i16> %1, +    iXLen %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i16> %1, +    iXLen %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8( +    <vscale x 1 x i8> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8( +    <vscale x 2 x i8> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8( +    <vscale x 4 x i8> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8( +    <vscale x 8 x i8> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8( +    <vscale x 16 x i8> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8( +    <vscale x 32 x i8> poison, +    ptr %0, +    <vscale x 32 x i8> %1, +    iXLen %2) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i8> %a +} + +declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  iXLen); + +define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v8 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8( +    <vscale x 64 x i8> poison, +    ptr %0, +    <vscale x 64 x i8> %1, +    iXLen %2) + +  ret <vscale x 64 x i8> %a +} + +declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  <vscale x 64 x i1>, +  iXLen, +  iXLen); + +define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8( +    <vscale x 64 x i8> %0, +    ptr %1, +    <vscale x 64 x i8> %2, +    <vscale x 64 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 64 x i8> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8( +    <vscale x 1 x i16> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8( +    <vscale x 2 x i16> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8( +    <vscale x 4 x i16> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8( +    <vscale x 8 x i16> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8( +    <vscale x 16 x i16> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8( +    <vscale x 32 x i16> poison, +    ptr %0, +    <vscale x 32 x i8> %1, +    iXLen %2) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x i16> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8( +    <vscale x 1 x i32> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8( +    <vscale x 2 x i32> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8( +    <vscale x 4 x i32> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8( +    <vscale x 8 x i32> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8( +    <vscale x 16 x i32> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x i32> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8( +    <vscale x 1 x i64> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8( +    <vscale x 2 x i64> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8( +    <vscale x 4 x i64> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8( +    <vscale x 8 x i64> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x i64> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8( +    <vscale x 1 x half> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8( +    <vscale x 2 x half> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8( +    <vscale x 4 x half> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8( +    <vscale x 8 x half> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8( +    <vscale x 16 x half> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv4r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8( +    <vscale x 32 x half> poison, +    ptr %0, +    <vscale x 32 x i8> %1, +    iXLen %2) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen, +  iXLen); + +define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 32 x half> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv1r.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8( +    <vscale x 1 x float> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8( +    <vscale x 2 x float> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8( +    <vscale x 4 x float> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8( +    <vscale x 8 x float> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv2r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8( +    <vscale x 16 x float> poison, +    ptr %0, +    <vscale x 16 x i8> %1, +    iXLen %2) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen, +  iXLen); + +define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 16 x float> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vluxei8.v v9, (a0), v8 +; CHECK-NEXT:    vmv.v.v v8, v9 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8( +    <vscale x 1 x double> poison, +    ptr %0, +    <vscale x 1 x i8> %1, +    iXLen %2) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen, +  iXLen); + +define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 1 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v10, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8( +    <vscale x 2 x double> poison, +    ptr %0, +    <vscale x 2 x i8> %1, +    iXLen %2) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen, +  iXLen); + +define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 2 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v12, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8( +    <vscale x 4 x double> poison, +    ptr %0, +    <vscale x 4 x i8> %1, +    iXLen %2) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen, +  iXLen); + +define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 4 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma +; CHECK-NEXT:    vmv1r.v v16, v8 +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vluxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8( +    <vscale x 8 x double> poison, +    ptr %0, +    <vscale x 8 x i8> %1, +    iXLen %2) + +  ret <vscale x 8 x double> %a +} + +declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen, +  iXLen); + +define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4, iXLen 1) + +  ret <vscale x 8 x double> %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll new file mode 100644 index 0000000..4963d91 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll @@ -0,0 +1,1293 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN:   < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll new file mode 100644 index 0000000..7ea2e17 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll @@ -0,0 +1,4881 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8( +    <vscale x 64 x i8> %0, +    ptr %1, +    <vscale x 64 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  <vscale x 64 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8( +    <vscale x 64 x i8> %0, +    ptr %1, +    <vscale x 64 x i8> %2, +    <vscale x 64 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll new file mode 100644 index 0000000..9bd272a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll @@ -0,0 +1,1310 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -global-isel -verify-machineinstrs \ +; RUN:   < %s | FileCheck %s + +; The intrinsics are not supported with RV32. + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +define void @intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> splat (i1 true), +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i64>, +  <vscale x 1 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i64> %2, +    <vscale x 1 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i64>, +  <vscale x 2 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i64> %2, +    <vscale x 2 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i64>, +  <vscale x 4 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i64> %2, +    <vscale x 4 x i1> %3, +    i64 %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  i64); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    i64 %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i64>, +  <vscale x 8 x i1>, +  i64); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i64> %2, +    <vscale x 8 x i1> %3, +    i64 %4) + +  ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll new file mode 100644 index 0000000..7cd1545 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll @@ -0,0 +1,4881 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN:   -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i32>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i32> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i32>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i32> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i32>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i32> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i32>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i32> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i32>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i32> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i16>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i16> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i16>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i16> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i16>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i16> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i16>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i16> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i16>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i16> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i16>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i16> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( +  <vscale x 1 x i8>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8( +    <vscale x 1 x i8> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( +  <vscale x 2 x i8>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8( +    <vscale x 2 x i8> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( +  <vscale x 4 x i8>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8( +    <vscale x 4 x i8> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( +  <vscale x 8 x i8>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8( +    <vscale x 8 x i8> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( +  <vscale x 16 x i8>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8( +    <vscale x 16 x i8> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( +  <vscale x 32 x i8>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8( +    <vscale x 32 x i8> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8( +    <vscale x 64 x i8> %0, +    ptr %1, +    <vscale x 64 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( +  <vscale x 64 x i8>, +  ptr, +  <vscale x 64 x i8>, +  <vscale x 64 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8( +    <vscale x 64 x i8> %0, +    ptr %1, +    <vscale x 64 x i8> %2, +    <vscale x 64 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( +  <vscale x 1 x i16>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8( +    <vscale x 1 x i16> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( +  <vscale x 2 x i16>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8( +    <vscale x 2 x i16> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( +  <vscale x 4 x i16>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8( +    <vscale x 4 x i16> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( +  <vscale x 8 x i16>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8( +    <vscale x 8 x i16> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( +  <vscale x 16 x i16>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8( +    <vscale x 16 x i16> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( +  <vscale x 32 x i16>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8( +    <vscale x 32 x i16> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( +  <vscale x 1 x i32>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8( +    <vscale x 1 x i32> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( +  <vscale x 2 x i32>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8( +    <vscale x 2 x i32> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( +  <vscale x 4 x i32>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8( +    <vscale x 4 x i32> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( +  <vscale x 8 x i32>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8( +    <vscale x 8 x i32> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( +  <vscale x 16 x i32>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8( +    <vscale x 16 x i32> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( +  <vscale x 1 x i64>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8( +    <vscale x 1 x i64> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( +  <vscale x 2 x i64>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8( +    <vscale x 2 x i64> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( +  <vscale x 4 x i64>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8( +    <vscale x 4 x i64> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( +  <vscale x 8 x i64>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8( +    <vscale x 8 x i64> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( +  <vscale x 1 x half>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8( +    <vscale x 1 x half> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( +  <vscale x 2 x half>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8( +    <vscale x 2 x half> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( +  <vscale x 4 x half>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8( +    <vscale x 4 x half> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( +  <vscale x 8 x half>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8( +    <vscale x 8 x half> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( +  <vscale x 16 x half>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8( +    <vscale x 16 x half> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( +  <vscale x 32 x half>, +  ptr, +  <vscale x 32 x i8>, +  <vscale x 32 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8( +    <vscale x 32 x half> %0, +    ptr %1, +    <vscale x 32 x i8> %2, +    <vscale x 32 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( +  <vscale x 1 x float>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8( +    <vscale x 1 x float> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( +  <vscale x 2 x float>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8( +    <vscale x 2 x float> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( +  <vscale x 4 x float>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8( +    <vscale x 4 x float> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( +  <vscale x 8 x float>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8( +    <vscale x 8 x float> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( +  <vscale x 16 x float>, +  ptr, +  <vscale x 16 x i8>, +  <vscale x 16 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8( +    <vscale x 16 x float> %0, +    ptr %1, +    <vscale x 16 x i8> %2, +    <vscale x 16 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( +  <vscale x 1 x double>, +  ptr, +  <vscale x 1 x i8>, +  <vscale x 1 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8( +    <vscale x 1 x double> %0, +    ptr %1, +    <vscale x 1 x i8> %2, +    <vscale x 1 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( +  <vscale x 2 x double>, +  ptr, +  <vscale x 2 x i8>, +  <vscale x 2 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8( +    <vscale x 2 x double> %0, +    ptr %1, +    <vscale x 2 x i8> %2, +    <vscale x 2 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( +  <vscale x 4 x double>, +  ptr, +  <vscale x 4 x i8>, +  <vscale x 4 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8( +    <vscale x 4 x double> %0, +    ptr %1, +    <vscale x 4 x i8> %2, +    <vscale x 4 x i1> %3, +    iXLen %4) + +  ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  iXLen); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16 +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    iXLen %3) + +  ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( +  <vscale x 8 x double>, +  ptr, +  <vscale x 8 x i8>, +  <vscale x 8 x i1>, +  iXLen); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: +; CHECK:       # %bb.0: # %entry +; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t +; CHECK-NEXT:    ret +entry: +  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8( +    <vscale x 8 x double> %0, +    ptr %1, +    <vscale x 8 x i8> %2, +    <vscale x 8 x i1> %3, +    iXLen %4) + +  ret void +} diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll new file mode 100644 index 0000000..bf0a2e5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll @@ -0,0 +1,41 @@ +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh < %s | FileCheck %s + +; CHECK-LABEL:  .section	.llvm_stackmaps +; CHECK-NEXT:  __LLVM_StackMaps: +; Header +; CHECK-NEXT:   .byte   3 +; CHECK-NEXT:   .byte   0 +; CHECK-NEXT:   .half   0 +; Num Functions +; CHECK-NEXT:   .word   1 +; Num LargeConstants +; CHECK-NEXT:   .word   0 +; Num Callsites +; CHECK-NEXT:   .word   1 + +; Functions and stack size +; CHECK-NEXT:   .quad   liveArgs +; CHECK-NEXT:   .quad   0 +; CHECK-NEXT:   .quad   1 + +; Spilled stack map values. +; +; Verify 3 stack map entries. +; +; CHECK-LABEL:  .word   .L{{.*}}-liveArgs +; CHECK-NEXT:   .half   0 +; CHECK-NEXT:   .half   25 +; +; Check that at least one is a spilled entry from SP. +; Location: Indirect SP + ... +; CHECK:        .byte   3 +; CHECK-NEXT:   .byte   0 +; CHECK-NEXT:   .half   8 +; CHECK-NEXT:   .half   2 +; CHECK-NEXT:   .half   0 +; CHECK-NEXT:   .word +define void @liveArgs(double %arg0, double %arg1, double %arg2, double %arg3, double %arg4, double %arg5, double %arg6, double %arg7, double %arg8, double %arg9, double %arg10, double %arg11, double %arg12, double %arg13, double %arg14, double %arg15, double %arg16, double %arg17, double %arg18, double %arg19, double %arg20, double %arg21, double %arg22, double %arg23, half %arg24, half %arg25, half %arg26, half %arg27, half %arg28, bfloat %arg29) { +entry: +  call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, double %arg0, double %arg1, double %arg2, double %arg3, double %arg4, double %arg5, double %arg6, double %arg7, double %arg8, double %arg9, double %arg10, double %arg11, double %arg12, double %arg13, double %arg14, double %arg15, double %arg16, double %arg17, double %arg18, double %arg19, double %arg20, double %arg21, double %arg22, double %arg23, half %arg24, half %arg25, half %arg26, half %arg27, half %arg28, bfloat %arg29) +  ret void +} diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll index c50a0fb3..320a3aa 100644 --- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll +++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll @@ -286,8 +286,8 @@ define void @liveConstant() {  ; CHECK-NEXT:   .half   0  ; CHECK-NEXT:   .half   28  ; -; Check that at least one is a spilled entry from RBP. -; Location: Indirect RBP + ... +; Check that at least one is a spilled entry from SP. +; Location: Indirect SP + ...  ; CHECK:        .byte   3  ; CHECK-NEXT:   .byte   0  ; CHECK-NEXT:   .half   8 @@ -307,7 +307,7 @@ entry:  ; CHECK-NEXT:   .half   0  ; 1 location  ; CHECK-NEXT:   .half   1 -; Loc 0: Direct RBP - ofs +; Loc 0: Direct SP + ofs  ; CHECK-NEXT:   .byte   2  ; CHECK-NEXT:   .byte   0  ; CHECK-NEXT:   .half   8 @@ -320,14 +320,14 @@ entry:  ; CHECK-NEXT:   .half   0  ; 2 locations  ; CHECK-NEXT:   .half   2 -; Loc 0: Direct RBP - ofs +; Loc 0: Direct SP + ofs  ; CHECK-NEXT:   .byte   2  ; CHECK-NEXT:   .byte   0  ; CHECK-NEXT:   .half   8  ; CHECK-NEXT:   .half   2  ; CHECK-NEXT:   .half   0  ; CHECK-NEXT:   .word -; Loc 1: Direct RBP - ofs +; Loc 1: Direct SP + ofs  ; CHECK-NEXT:   .byte   2  ; CHECK-NEXT:   .byte   0  ; CHECK-NEXT:   .half   8 diff --git a/llvm/test/DebugInfo/debug-bool-const-value.ll b/llvm/test/DebugInfo/debug-bool-const-value.ll new file mode 100644 index 0000000..84cf993 --- /dev/null +++ b/llvm/test/DebugInfo/debug-bool-const-value.ll @@ -0,0 +1,29 @@ +; REQUIRES: object-emission +; RUN: %llc_dwarf %s -filetype=obj -o - | llvm-dwarfdump - | FileCheck %s + +; CHECK: {{.*}}DW_TAG_variable +; CHECK-NEXT: {{.*}} DW_AT_const_value     (1) +; CHECK-NEXT: {{.*}} DW_AT_name    ("arg") + +define void @test() !dbg !5 +{ +entry: +  call void @"llvm.dbg.value"(metadata i1 true, metadata !7, metadata !8), !dbg !6 +  ret void, !dbg !6 +} + +declare void @"llvm.dbg.value"(metadata %".1", metadata %".2", metadata %".3") + +!llvm.dbg.cu = !{ !2 } +!llvm.module.flags = !{ !9, !10 } + +!1 = !DIFile(directory: "", filename: "test") +!2 = distinct !DICompileUnit(emissionKind: FullDebug, file: !1, isOptimized: false, language: DW_LANG_C_plus_plus, runtimeVersion: 0) +!3 = !DIBasicType(encoding: DW_ATE_boolean, name: "bool", size: 8) +!4 = !DISubroutineType(types: !{null}) +!5 = distinct !DISubprogram(file: !1, isDefinition: true, isLocal: false, isOptimized: false, line: 5, linkageName: "test", name: "test", scope: !1, scopeLine: 5, type: !4, unit: !2) +!6 = !DILocation(column: 1, line: 5, scope: !5) +!7 = !DILocalVariable(arg: 0, file: !1, line: 5, name: "arg", scope: !5, type: !3) +!8 = !DIExpression() +!9 = !{ i32 2, !"Dwarf Version", i32 4 } +!10 = !{ i32 2, !"Debug Info Version", i32 3 } diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll index 65b96c8..62975a3 100644 --- a/llvm/test/Other/new-pm-defaults.ll +++ b/llvm/test/Other/new-pm-defaults.ll @@ -208,6 +208,7 @@  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass  ; CHECK-EP-PEEPHOLE-NEXT: Running pass: NoOpFunctionPass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll index 3a0fffe..012a1ab 100644 --- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll @@ -133,6 +133,7 @@  ; CHECK-O-NEXT: Running pass: BDCEPass  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll index 4623edc..e021ff3 100644 --- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll @@ -118,6 +118,7 @@  ; CHECK-O-NEXT: Running pass: BDCEPass  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll index 590afd9..20f94bc 100644 --- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll @@ -127,6 +127,7 @@  ; CHECK-O-NEXT: Running pass: BDCEPass  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll index dd6acd2..b61edc8 100644 --- a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll @@ -165,6 +165,7 @@  ; CHECK-O-NEXT: Running pass: BDCEPass  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll index ee05452..acf8c05 100644 --- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll @@ -167,6 +167,7 @@  ; CHECK-O-NEXT: Running pass: BDCEPass  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll index fd95e94..6b3c5ca 100644 --- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll @@ -131,6 +131,7 @@  ; CHECK-O-NEXT: Running pass: BDCEPass  ; CHECK-O-NEXT: Running analysis: DemandedBitsAnalysis  ; CHECK-O-NEXT: Running pass: InstCombinePass +; CHECK-O23SZ-NEXT: Running pass: DFAJumpThreadingPass  ; CHECK-O23SZ-NEXT: Running pass: JumpThreadingPass  ; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis  ; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass diff --git a/llvm/test/Transforms/Attributor/ArgumentPromotion/X86/min-legal-vector-width.ll b/llvm/test/Transforms/Attributor/ArgumentPromotion/X86/min-legal-vector-width.ll index 649e946..fffe50f 100644 --- a/llvm/test/Transforms/Attributor/ArgumentPromotion/X86/min-legal-vector-width.ll +++ b/llvm/test/Transforms/Attributor/ArgumentPromotion/X86/min-legal-vector-width.ll @@ -9,15 +9,25 @@ target triple = "x86_64-unknown-linux-gnu"  ; This should promote  define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(ptr %arg, ptr readonly %arg1) #0 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR0:[0-9]+]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; CGSCC-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64, !invariant.load [[META0:![0-9]+]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -66,15 +76,25 @@ bb:  ; This should promote  define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(ptr %arg, ptr readonly %arg1) #1 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR1:[0-9]+]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR1:[0-9]+]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR1:[0-9]+]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; CGSCC-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -123,15 +143,25 @@ bb:  ; This should promote  define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(ptr %arg, ptr readonly %arg1) #1 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR1]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR1]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR1]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; CGSCC-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -180,15 +210,25 @@ bb:  ; This should promote  define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(ptr %arg, ptr readonly %arg1) #0 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR0]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR0]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR0]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; CGSCC-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -237,13 +277,21 @@ bb:  ; This should not promote  define internal fastcc void @callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(ptr %arg, ptr readonly %arg1) #1 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], ptr noalias nofree noundef nonnull readonly align 64 captures(none) dereferenceable(64) [[ARG1:%.*]]) #[[ATTR1]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], ptr noalias nofree noundef nonnull readonly align 64 captures(none) dereferenceable(64) [[ARG1:%.*]]) #[[ATTR1]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], ptr noalias nofree noundef nonnull readonly align 64 captures(none) dereferenceable(64) [[ARG1:%.*]]) #[[ATTR1]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -290,13 +338,21 @@ bb:  ; This should not promote  define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(ptr %arg, ptr readonly %arg1) #2 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], ptr noalias nofree noundef nonnull readonly align 64 captures(none) dereferenceable(64) [[ARG1:%.*]]) #[[ATTR2:[0-9]+]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], ptr noalias nofree noundef nonnull readonly align 64 captures(none) dereferenceable(64) [[ARG1:%.*]]) #[[ATTR2]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], ptr noalias nofree noundef nonnull readonly align 64 captures(none) dereferenceable(64) [[ARG1:%.*]]) #[[ATTR2]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -343,15 +399,25 @@ bb:  ; This should promote  define internal fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(ptr %arg, ptr readonly %arg1) #3 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR3:[0-9]+]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; CGSCC-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -400,15 +466,25 @@ bb:  ; This should promote  define internal fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(ptr %arg, ptr readonly %arg1) #4 {  ; -; CHECK: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable -; CHECK-LABEL: define {{[^@]+}}@callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256 -; CHECK-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR3]] { -; CHECK-NEXT:  bb: -; CHECK-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 -; CHECK-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 -; CHECK-NEXT:    ret void +; TUNIT: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; TUNIT-LABEL: define {{[^@]+}}@callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256 +; TUNIT-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR3]] { +; TUNIT-NEXT:  bb: +; TUNIT-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64 +; TUNIT-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; TUNIT-NEXT:    ret void +; +; CGSCC: Function Attrs: inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable +; CGSCC-LABEL: define {{[^@]+}}@callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256 +; CGSCC-SAME: (ptr noalias nofree noundef nonnull writeonly align 64 captures(none) dereferenceable(64) [[ARG:%.*]], <8 x i64> [[TMP0:%.*]]) #[[ATTR3]] { +; CGSCC-NEXT:  bb: +; CGSCC-NEXT:    [[ARG1_PRIV:%.*]] = alloca <8 x i64>, align 64 +; CGSCC-NEXT:    store <8 x i64> [[TMP0]], ptr [[ARG1_PRIV]], align 64 +; CGSCC-NEXT:    [[TMP:%.*]] = load <8 x i64>, ptr [[ARG1_PRIV]], align 64, !invariant.load [[META0]] +; CGSCC-NEXT:    store <8 x i64> [[TMP]], ptr [[ARG]], align 64 +; CGSCC-NEXT:    ret void  ;  bb:    %tmp = load <8 x i64>, ptr %arg1 @@ -464,6 +540,14 @@ attributes #3 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2  attributes #4 = { inlinehint norecurse nounwind uwtable "target-features"="+avx2" "min-legal-vector-width"="256" "prefer-vector-width"="256" }  attributes #5 = { argmemonly nounwind }  ;. +; CGSCC: attributes #[[ATTR0]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="512" "target-features"="+avx512vl" } +; CGSCC: attributes #[[ATTR1]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="256" "target-features"="+avx512vl" } +; CGSCC: attributes #[[ATTR2]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="256" "prefer-vector-width"="256" "target-features"="+avx512vl" } +; CGSCC: attributes #[[ATTR3]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="256" "target-features"="+avx2" } +; CGSCC: attributes #[[ATTR4:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) } +; CGSCC: attributes #[[ATTR5]] = { nofree willreturn memory(write) } +; CGSCC: attributes #[[ATTR6]] = { nofree nounwind willreturn } +;.  ; TUNIT: attributes #[[ATTR0]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="512" "target-features"="+avx512vl" }  ; TUNIT: attributes #[[ATTR1]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="256" "target-features"="+avx512vl" }  ; TUNIT: attributes #[[ATTR2]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="256" "prefer-vector-width"="256" "target-features"="+avx512vl" } @@ -472,11 +556,7 @@ attributes #5 = { argmemonly nounwind }  ; TUNIT: attributes #[[ATTR5]] = { nofree willreturn memory(write) }  ; TUNIT: attributes #[[ATTR6]] = { nofree nosync nounwind willreturn }  ;. -; CGSCC: attributes #[[ATTR0]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="512" "target-features"="+avx512vl" } -; CGSCC: attributes #[[ATTR1]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="256" "target-features"="+avx512vl" } -; CGSCC: attributes #[[ATTR2]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="256" "prefer-vector-width"="256" "target-features"="+avx512vl" } -; CGSCC: attributes #[[ATTR3]] = { inlinehint mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite) uwtable "min-legal-vector-width"="512" "prefer-vector-width"="256" "target-features"="+avx2" } -; CGSCC: attributes #[[ATTR4:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: write) } -; CGSCC: attributes #[[ATTR5]] = { nofree willreturn memory(write) } -; CGSCC: attributes #[[ATTR6]] = { nofree nounwind willreturn } +; CGSCC: [[META0]] = !{}  ;. +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CHECK: {{.*}} diff --git a/llvm/test/Transforms/Attributor/align-ptrmask.ll b/llvm/test/Transforms/Attributor/align-ptrmask.ll new file mode 100644 index 0000000..008f5e1 --- /dev/null +++ b/llvm/test/Transforms/Attributor/align-ptrmask.ll @@ -0,0 +1,206 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=attributor -S < %s | FileCheck %s + +define ptr @align_ptrmask_back_no_prop(ptr align 2 %x, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define noundef nonnull align 8 dereferenceable(4) ptr @align_ptrmask_back_no_prop( +; CHECK-SAME: ptr nofree writeonly align 2 [[X:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP1]], i64 -32, i64 -8 +; CHECK-NEXT:    [[SEL1:%.*]] = select i1 [[CMP2]], i64 [[SEL]], i64 -16 +; CHECK-NEXT:    [[P:%.*]] = tail call noundef nonnull align 8 dereferenceable(4) ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef [[SEL1]]) #[[ATTR4:[0-9]+]] +; CHECK-NEXT:    store float 1.000000e+00, ptr [[P]], align 8 +; CHECK-NEXT:    ret ptr [[P]] +; +  %sel = select i1 %cmp1, i64 -32, i64 -8 +  %sel1 = select i1 %cmp2, i64 %sel, i64 -16 +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 %sel1) +  store float 1.0, ptr %p, align 8 +  ret ptr %p +} + +define ptr @align_ptrmask_back_prop(ptr align 2 %x, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define noundef nonnull align 16 dereferenceable(4) ptr @align_ptrmask_back_prop( +; CHECK-SAME: ptr nofree writeonly align 16 [[X:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP1]], i64 -32, i64 -8 +; CHECK-NEXT:    [[SEL1:%.*]] = select i1 [[CMP2]], i64 [[SEL]], i64 -16 +; CHECK-NEXT:    [[P:%.*]] = tail call noundef nonnull align 16 dereferenceable(4) ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef [[SEL1]]) #[[ATTR4]] +; CHECK-NEXT:    store float 1.000000e+00, ptr [[P]], align 16 +; CHECK-NEXT:    ret ptr [[P]] +; +  %sel = select i1 %cmp1, i64 -32, i64 -8 +  %sel1 = select i1 %cmp2, i64 %sel, i64 -16 +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 %sel1) +  store float 1.0, ptr %p, align 16 +  ret ptr %p +} + +define ptr @align_ptrmask_forward_mask(ptr align 2 %x, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 8 ptr @align_ptrmask_forward_mask( +; CHECK-SAME: ptr nofree readnone align 2 [[X:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP1]], i64 -32, i64 -8 +; CHECK-NEXT:    [[SEL1:%.*]] = select i1 [[CMP2]], i64 [[SEL]], i64 -16 +; CHECK-NEXT:    [[P:%.*]] = tail call align 8 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef [[SEL1]]) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %sel = select i1 %cmp1, i64 -32, i64 -8 +  %sel1 = select i1 %cmp2, i64 %sel, i64 -16 +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 %sel1) +  ret ptr %p +} + +define ptr @align_ptrmask_forward_ptr(ptr align 16 %x, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 16 ptr @align_ptrmask_forward_ptr( +; CHECK-SAME: ptr nofree readnone align 16 [[X:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP1]], i64 -32, i64 -8 +; CHECK-NEXT:    [[SEL1:%.*]] = select i1 [[CMP2]], i64 [[SEL]], i64 -16 +; CHECK-NEXT:    [[P:%.*]] = tail call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef [[SEL1]]) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %sel = select i1 %cmp1, i64 -32, i64 -8 +  %sel1 = select i1 %cmp2, i64 %sel, i64 -16 +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 %sel1) +  ret ptr %p +} + +define ptr @align_ptrmask_forward_nonconst_mask(ptr align 8 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 8 ptr @align_ptrmask_forward_nonconst_mask( +; CHECK-SAME: ptr nofree readnone align 8 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP1]], i64 -32, i64 [[Y]] +; CHECK-NEXT:    [[SEL1:%.*]] = select i1 [[CMP2]], i64 [[SEL]], i64 -16 +; CHECK-NEXT:    [[P:%.*]] = tail call align 8 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 [[SEL1]]) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %sel = select i1 %cmp1, i64 -32, i64 %y +  %sel1 = select i1 %cmp2, i64 %sel, i64 -16 +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 %sel1) +  ret ptr %p +} + +define ptr @align_ptrmask_back_nonconst_mask(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define noundef nonnull align 8 dereferenceable(4) ptr @align_ptrmask_back_nonconst_mask( +; CHECK-SAME: ptr nofree writeonly align 8 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP1]], i64 -32, i64 [[Y]] +; CHECK-NEXT:    [[SEL1:%.*]] = select i1 [[CMP2]], i64 [[SEL]], i64 -16 +; CHECK-NEXT:    [[P:%.*]] = tail call noundef nonnull align 8 dereferenceable(4) ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 [[SEL1]]) #[[ATTR4]] +; CHECK-NEXT:    store float 1.000000e+00, ptr [[P]], align 8 +; CHECK-NEXT:    ret ptr [[P]] +; +  %sel = select i1 %cmp1, i64 -32, i64 %y +  %sel1 = select i1 %cmp2, i64 %sel, i64 -16 +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 %sel1) +  store float 1.0, ptr %p, align 8 +  ret ptr %p +} + +define ptr @align_ptrmask_back_const_back_noprop(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define noundef nonnull align 8 dereferenceable(4) ptr @align_ptrmask_back_const_back_noprop( +; CHECK-SAME: ptr nofree writeonly align 4 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT:    [[P:%.*]] = tail call noundef nonnull align 8 dereferenceable(4) ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -8) #[[ATTR4]] +; CHECK-NEXT:    store float 1.000000e+00, ptr [[P]], align 8 +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -8) +  store float 1.0, ptr %p, align 8 +  ret ptr %p +} + +define ptr @align_ptrmask_back_const_back_prop(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define noundef nonnull align 8 dereferenceable(4) ptr @align_ptrmask_back_const_back_prop( +; CHECK-SAME: ptr nofree writeonly align 8 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR0]] { +; CHECK-NEXT:    [[P:%.*]] = tail call noundef nonnull align 8 dereferenceable(4) ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -2) #[[ATTR4]] +; CHECK-NEXT:    store float 1.000000e+00, ptr [[P]], align 8 +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -2) +  store float 1.0, ptr %p, align 8 +  ret ptr %p +} + +define ptr @align_ptrmask_back_const_forward_mask(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 8 ptr @align_ptrmask_back_const_forward_mask( +; CHECK-SAME: ptr nofree readnone align 4 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 8 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -8) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -8) +  ret ptr %p +} + +define ptr @align_ptrmask_back_const_forward_ptr(ptr align 16 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 16 ptr @align_ptrmask_back_const_forward_ptr( +; CHECK-SAME: ptr nofree readnone align 16 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -8) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -8) +  ret ptr %p +} + +; FIXME: The store will create AAAlign for %ptr1, +; but the attribute didn't propagate through extractelement, need propagate +define <2 x ptr> @ptrmask_v2p0_v2i64(<2 x ptr> align 2 %ptr, i64 %a) { +; CHECK-LABEL: define <2 x ptr> @ptrmask_v2p0_v2i64( +; CHECK-SAME: <2 x ptr> align 2 [[PTR:%.*]], i64 [[A:%.*]]) #[[ATTR2:[0-9]+]] { +; CHECK-NEXT:    [[RESULT:%.*]] = call <2 x ptr> @llvm.ptrmask.v2p0.v2i64(<2 x ptr> [[PTR]], <2 x i64> noundef splat (i64 -8)) #[[ATTR4]] +; CHECK-NEXT:    [[PTR1:%.*]] = extractelement <2 x ptr> [[RESULT]], i32 0 +; CHECK-NEXT:    [[PTR2:%.*]] = extractelement <2 x ptr> [[RESULT]], i32 1 +; CHECK-NEXT:    store i64 [[A]], ptr [[PTR1]], align 16 +; CHECK-NEXT:    store i64 [[A]], ptr [[PTR2]], align 16 +; CHECK-NEXT:    ret <2 x ptr> [[RESULT]] +; +  %result = call <2 x ptr> @llvm.ptrmask.v2p0.v2i64(<2 x ptr> %ptr, <2 x i64> splat(i64 -8)) +  %ptr1 = extractelement <2 x ptr> %result, i32 0 +  %ptr2 = extractelement <2 x ptr> %result, i32 1 +  store i64 %a, ptr %ptr1, align 16 +  store i64 %a, ptr %ptr2, align 16 +  ret <2 x ptr> %result +} + +define ptr @align_ptrmask_forward_mask_positive(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 4 ptr @align_ptrmask_forward_mask_positive( +; CHECK-SAME: ptr nofree readnone align 4 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 4 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef 2) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 2) +  ret ptr %p +} + +define ptr @align_ptrmask_forward_mask_poison(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 4 ptr @align_ptrmask_forward_mask_poison( +; CHECK-SAME: ptr nofree readnone align 4 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 4 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 poison) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 poison) +  ret ptr %p +} + +define ptr @align_ptrmask_forward_mask_max(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 4294967296 ptr @align_ptrmask_forward_mask_max( +; CHECK-SAME: ptr nofree readnone align 4 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 4294967296 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -4294967296) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -4294967296) +  ret ptr %p +} + +define ptr @align_ptrmask_forward_mask_max_plus_one(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 4294967296 ptr @align_ptrmask_forward_mask_max_plus_one( +; CHECK-SAME: ptr nofree readnone align 4 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 4294967296 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -8589934592) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -8589934592) +  ret ptr %p +} + +define ptr @align_ptrmask_back_callsite(ptr align 4 %x, i64 %y, i1 %cmp1, i1 %cmp2) { +; CHECK-LABEL: define align 16 ptr @align_ptrmask_back_callsite( +; CHECK-SAME: ptr nofree readnone align 16 [[X:%.*]], i64 [[Y:%.*]], i1 [[CMP1:%.*]], i1 [[CMP2:%.*]]) #[[ATTR1]] { +; CHECK-NEXT:    [[P:%.*]] = tail call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[X]], i64 noundef -4) #[[ATTR4]] +; CHECK-NEXT:    ret ptr [[P]] +; +  %p = tail call align 16 ptr @llvm.ptrmask.p0.i64(ptr %x, i64 -4) +  ret ptr %p +} diff --git a/llvm/test/Transforms/OpenMP/parallel_deletion.ll b/llvm/test/Transforms/OpenMP/parallel_deletion.ll index 67970c4..0b6c4f3 100644 --- a/llvm/test/Transforms/OpenMP/parallel_deletion.ll +++ b/llvm/test/Transforms/OpenMP/parallel_deletion.ll @@ -385,7 +385,7 @@ define internal void @.omp_outlined..4(ptr noalias %.global_tid., ptr noalias %.  ; CHECK-LABEL: define {{[^@]+}}@.omp_outlined..4  ; CHECK-SAME: (ptr noalias nofree noundef nonnull readonly align 4 captures(none) dereferenceable(4) [[DOTGLOBAL_TID_:%.*]], ptr noalias nofree readnone captures(none) [[DOTBOUND_TID_:%.*]], ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A:%.*]]) {  ; CHECK-NEXT:  entry: -; CHECK-NEXT:    [[TMP:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 +; CHECK-NEXT:    [[TMP:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !invariant.load [[META1:![0-9]+]]  ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_master(ptr noundef nonnull @[[GLOB0]], i32 [[TMP]])  ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0  ; CHECK-NEXT:    br i1 [[TMP2]], label [[OMP_IF_END:%.*]], label [[OMP_IF_THEN:%.*]] @@ -458,7 +458,7 @@ define internal void @.omp_outlined..5(ptr noalias %.global_tid., ptr noalias %.  ; CHECK-SAME: (ptr noalias nofree readonly captures(none) [[DOTGLOBAL_TID_:%.*]], ptr noalias nofree readnone captures(none) [[DOTBOUND_TID_:%.*]], ptr nofree noundef nonnull align 4 captures(none) dereferenceable(4) [[A:%.*]]) {  ; CHECK-NEXT:  entry:  ; CHECK-NEXT:    [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr noundef nonnull @[[GLOB0]]) #[[ATTR19]] -; CHECK-NEXT:    [[TMP:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 +; CHECK-NEXT:    [[TMP:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !invariant.load [[META1]]  ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @__kmpc_single(ptr noundef nonnull @[[GLOB0]], i32 [[TMP]])  ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0  ; CHECK-NEXT:    br i1 [[TMP2]], label [[OMP_IF_END:%.*]], label [[OMP_IF_THEN:%.*]] @@ -534,7 +534,7 @@ define internal void @.omp_outlined..6(ptr noalias %.global_tid., ptr noalias %.  ; CHECK-NEXT:    call void @llvm.lifetime.start.p0(ptr noundef nonnull align 4 [[A1]]) #[[ATTR20:[0-9]+]]  ; CHECK-NEXT:    store i32 1, ptr [[A1]], align 4  ; CHECK-NEXT:    store ptr [[A1]], ptr [[DOTOMP_REDUCTION_RED_LIST]], align 8 -; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4 +; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !invariant.load [[META1]]  ; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @__kmpc_reduce_nowait(ptr noundef nonnull @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 noundef 1, i64 noundef 8, ptr noundef nonnull align 8 [[DOTOMP_REDUCTION_RED_LIST]], ptr noundef nonnull @.omp.reduction.reduction_func, ptr noundef nonnull @.gomp_critical_user_.reduction.var)  ; CHECK-NEXT:    switch i32 [[TMP4]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [  ; CHECK-NEXT:      i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] @@ -646,10 +646,10 @@ define internal void @.omp.reduction.reduction_func(ptr %arg, ptr %arg1) {  ; CHECK-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func  ; CHECK-SAME: (ptr nofree noundef nonnull readonly align 8 captures(none) dereferenceable(8) [[ARG:%.*]], ptr nofree noundef nonnull readonly align 8 captures(none) dereferenceable(8) [[ARG1:%.*]]) #[[ATTR10:[0-9]+]] {  ; CHECK-NEXT:  entry: -; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[ARG1]], align 8 -; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[ARG]], align 8 -; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 -; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP2]], align 4 +; CHECK-NEXT:    [[TMP2:%.*]] = load ptr, ptr [[ARG1]], align 8, !invariant.load [[META1]] +; CHECK-NEXT:    [[TMP4:%.*]] = load ptr, ptr [[ARG]], align 8, !invariant.load [[META1]] +; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !invariant.load [[META1]] +; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP2]], align 4, !invariant.load [[META1]]  ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP6]]  ; CHECK-NEXT:    store i32 [[ADD]], ptr [[TMP4]], align 4  ; CHECK-NEXT:    ret void diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/partial-unswitch.ll b/llvm/test/Transforms/SimpleLoopUnswitch/partial-unswitch.ll index 1d89420..8716170 100644 --- a/llvm/test/Transforms/SimpleLoopUnswitch/partial-unswitch.ll +++ b/llvm/test/Transforms/SimpleLoopUnswitch/partial-unswitch.ll @@ -1,14 +1,14 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals  ; RUN: opt -passes='loop-mssa(simple-loop-unswitch<nontrivial>),verify<loops>' -S < %s | FileCheck %s  declare void @clobber() -define i32 @partial_unswitch_true_successor(ptr %ptr, i32 %N) { +define i32 @partial_unswitch_true_successor(ptr %ptr, i32 %N) !prof !0 {  ; CHECK-LABEL: @partial_unswitch_true_successor(  ; CHECK-NEXT:  entry:  ; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[PTR:%.*]], align 4  ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 100 -; CHECK-NEXT:    br i1 [[TMP1]], label [[ENTRY_SPLIT_US:%.*]], label [[ENTRY_SPLIT:%.*]] +; CHECK-NEXT:    br i1 [[TMP1]], label [[ENTRY_SPLIT_US:%.*]], label [[ENTRY_SPLIT:%.*]], !prof [[PROF1:![0-9]+]]  ; CHECK:       entry.split.us:  ; CHECK-NEXT:    br label [[LOOP_HEADER_US:%.*]]  ; CHECK:       loop.header.us: @@ -19,7 +19,7 @@ define i32 @partial_unswitch_true_successor(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch.us:  ; CHECK-NEXT:    [[C_US:%.*]] = icmp ult i32 [[IV_US]], [[N:%.*]]  ; CHECK-NEXT:    [[IV_NEXT_US]] = add i32 [[IV_US]], 1 -; CHECK-NEXT:    br i1 [[C_US]], label [[LOOP_HEADER_US]], label [[EXIT_SPLIT_US:%.*]] +; CHECK-NEXT:    br i1 [[C_US]], label [[LOOP_HEADER_US]], label [[EXIT_SPLIT_US:%.*]], !prof [[PROF2:![0-9]+]]  ; CHECK:       exit.split.us:  ; CHECK-NEXT:    br label [[EXIT:%.*]]  ; CHECK:       entry.split: @@ -28,7 +28,7 @@ define i32 @partial_unswitch_true_successor(ptr %ptr, i32 %N) {  ; CHECK-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY_SPLIT]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]  ; CHECK-NEXT:    [[LV:%.*]] = load i32, ptr [[PTR]], align 4  ; CHECK-NEXT:    [[SC:%.*]] = icmp eq i32 [[LV]], 100 -; CHECK-NEXT:    br i1 [[SC]], label [[NOCLOBBER:%.*]], label [[CLOBBER:%.*]] +; CHECK-NEXT:    br i1 [[SC]], label [[NOCLOBBER:%.*]], label [[CLOBBER:%.*]], !prof [[PROF1]]  ; CHECK:       noclobber:  ; CHECK-NEXT:    br label [[LOOP_LATCH]]  ; CHECK:       clobber: @@ -37,7 +37,7 @@ define i32 @partial_unswitch_true_successor(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !prof [[PROF2]], !llvm.loop [[LOOP3:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -50,7 +50,7 @@ loop.header:    %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ]    %lv = load i32, ptr %ptr    %sc = icmp eq i32 %lv, 100 -  br i1 %sc, label %noclobber, label %clobber +  br i1 %sc, label %noclobber, label %clobber, !prof !1  noclobber:    br label %loop.latch @@ -62,7 +62,7 @@ clobber:  loop.latch:    %c = icmp ult i32 %iv, %N    %iv.next = add i32 %iv, 1 -  br i1 %c, label %loop.header, label %exit +  br i1 %c, label %loop.header, label %exit, !prof !2  exit:    ret i32 10 @@ -102,7 +102,7 @@ define i32 @partial_unswitch_false_successor(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP5:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -171,7 +171,7 @@ define i32 @partial_unswtich_gep_load_icmp(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP6:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -246,7 +246,7 @@ define i32 @partial_unswitch_reduction_phi(ptr %ptr, i32 %N) {  ; CHECK-NEXT:    [[RED_NEXT]] = phi i32 [ [[ADD_5]], [[CLOBBER]] ], [ [[ADD_10]], [[NOCLOBBER]] ]  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP7:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], [[LOOP_LATCH]] ]  ; CHECK-NEXT:    br label [[EXIT]] @@ -325,7 +325,7 @@ define i32 @partial_unswitch_true_successor_noclobber(ptr noalias %ptr.1, ptr no  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP8:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -637,7 +637,7 @@ define i32 @partial_unswitch_true_successor_preheader_insertion(ptr %ptr, i32 %N  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_LOOPEXIT_SPLIT:%.*]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_LOOPEXIT_SPLIT:%.*]], !llvm.loop [[LOOP9:![0-9]+]]  ; CHECK:       exit.loopexit.split:  ; CHECK-NEXT:    br label [[EXIT_LOOPEXIT]]  ; CHECK:       exit.loopexit: @@ -713,7 +713,7 @@ define i32 @partial_unswitch_true_successor_insert_point(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP10:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -784,7 +784,7 @@ define i32 @partial_unswitch_true_successor_hoist_invariant(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -1073,7 +1073,7 @@ define i32 @partial_unswitch_true_to_latch(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP12:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -1138,7 +1138,7 @@ define i32 @partial_unswitch_exiting_block_with_multiple_unswitch_candidates(i32  ; CHECK-NEXT:    store i32 [[TMP1:%.*]], ptr [[PTR]], align 16  ; CHECK-NEXT:    br label [[EXITING]]  ; CHECK:       exiting: -; CHECK-NEXT:    br i1 [[EXIT_COND]], label [[LOOP]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT:    br i1 [[EXIT_COND]], label [[LOOP]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP13:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    [[RET_VAL:%.*]] = phi i32 [ 1, [[EXITING]] ]  ; CHECK-NEXT:    br label [[EXIT]] @@ -1249,7 +1249,7 @@ define i32 @partial_unswitch_true_successor_for_cost_calculation(ptr %ptr, i32 %  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP14:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -1360,7 +1360,7 @@ define i32 @partial_unswitch_true_successor_trunc(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP15:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -1425,7 +1425,7 @@ define i32 @partial_unswitch_false_successor_trunc(ptr %ptr, i32 %N) {  ; CHECK:       loop.latch:  ; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[IV]], [[N]]  ; CHECK-NEXT:    [[IV_NEXT]] = add i32 [[IV]], 1 -; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP13:![0-9]+]] +; CHECK-NEXT:    br i1 [[C]], label [[LOOP_HEADER]], label [[EXIT_SPLIT:%.*]], !llvm.loop [[LOOP16:![0-9]+]]  ; CHECK:       exit.split:  ; CHECK-NEXT:    br label [[EXIT]]  ; CHECK:       exit: @@ -1456,15 +1456,26 @@ exit:    ret i32 10  } -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[UNSWITCH_PARTIAL_DISABLE:![0-9]+]]} -; CHECK: [[UNSWITCH_PARTIAL_DISABLE]] = !{!"llvm.loop.unswitch.partial.disable"} -; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[UNSWITCH_PARTIAL_DISABLE]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[UNSWITCH_PARTIAL_DISABLE]]} +!0 = !{!"function_entry_count", i32 10} +!1 = !{!"branch_weights", i32 1000, i32 1} +!2 = !{!"branch_weights", i32 100, i32 3} + +;. +; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i32 10} +; CHECK: [[PROF1]] = !{!"branch_weights", i32 1000, i32 1} +; CHECK: [[PROF2]] = !{!"branch_weights", i32 100, i32 3} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META4:![0-9]+]]} +; CHECK: [[META4]] = !{!"llvm.loop.unswitch.partial.disable"} +; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META4]]} +; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META4]]} +; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META4]]} +; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META4]]} +; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META4]]} +; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META4]]} +; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META4]]} +; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META4]]} +; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META4]]} +; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META4]]} +; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META4]]} +; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META4]]} +;. diff --git a/llvm/test/Transforms/StructurizeCFG/callbr.ll b/llvm/test/Transforms/StructurizeCFG/callbr.ll new file mode 100644 index 0000000..42f9519 --- /dev/null +++ b/llvm/test/Transforms/StructurizeCFG/callbr.ll @@ -0,0 +1,235 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -passes=structurizecfg %s -o - | FileCheck %s + +; Structurize as usual, but don't tear callbr and its destination blocks apart. +; +; Note: currently, callbr blocks and their corresponding target blocks +; themselves are not handled by the structurizer.* If the CFG turns out to be +; unstructured at the end, the CFG lowering (si-annotate-control-flow) will +; detect this. For the currently intended use cases of callbr in the context of +; the AMDGPU backend, this is not a limitation (cf. +; https://discourse.llvm.org/t/rfc-add-callbr-intrinsic-support/86087). +; +; Note 2: while callbr and its targets remain untouched, everything else is +; handled as usual, even if it is nested in a callbr region. +; +; *FIXME: this will be fixed in the future. Callbr can be handled as follows: +; Input IR: +; ``` +; define void @foo_callbr() { +;   callbr void asm "", "!i"() to label %fallthrough [label %indirect, ...] +; fallthrough: +;   br label %exit +; indirect: +;   br label %exit +; ... +; exit: +;   ret void +; } +; ``` +; +; Output IR: +; ``` +; define void @foo_callbr() { +;   callbr void asm "", "!i"() +;          to label %fallthrough [label %fake.indirect, label %fake.indirect1, label %fake.indirect2, ...] +; fake.indirect:                                    ; preds = %0 +;   br label %Flow +; fake.indirect1:                                   ; preds = %0 +;   br label %Flow +; fake.indirect2:                                   ; preds = %0 +;   br label %Flow +; ... +; Flow:                                             ; preds = %fallthrough, %fake.indirect[0-N] +;   %1 = phi i1 [ false, %fallthrough ], [ true, %fake.indirect ], [ false, %fake.indirect[1-N] ] +;   br i1 %1, label %indirect, label %Flow1 +; Flow1:                                            ; preds = %Flow, %indirect +;   %2 = phi i1 [ false, %Flow], [ true, %fake.indirect1 ], [ false, %indirect ] +;   br i1 %2, label %indirect1, label %Flow2 +; Flow2:                                            ; preds = %Flow, %indirect1 +;   %2 = phi i1 [ false, %Flow], [ true, %fake.indirect2 ], [ false, %indirect1 ] +;   br i1 %2, label %indirect2, label %Flow3 +; ... +; fallthrough:                                      ; preds = %0 +;   br label %Flow +; indirect:                                         ; preds = %Flow +;   br label %Flow1 +; indirect1:                                        ; preds = %Flow1 +;   br label %Flow2 +; indirect2:                                        : preds = %Flow2 +;   br label %Flow3 +; ... +; exit:                                             ; preds = %indirectN, %FlowN +;   ret void +; } +; ``` +; +; Output IR as ASCII-art: +;          %0 +; --------------------- +; |     |     |     | +; v     v     v     v +; f    f.i   f.i1  f.i2 +; |     |     |     | +; v     v     v     v +; --------------------- +;        %Flow +;          |   \ +;          |    %indirect +;          |   / +;       %Flow1 +;          |   \ +;          |    %indirect1 +;          |   / +;       %Flow2 +;          |   \ +;          |    %indirect2 +;          |   / +;        %exit +; + +; Only callbr, nothing to do. +define void @callbr_simple() { +; CHECK-LABEL: define void @callbr_simple() { +; CHECK-NEXT:  [[CALLBR:.*:]] +; CHECK-NEXT:    callbr void asm "", "!i"() +; CHECK-NEXT:            to label %[[INDIRECT:.*]] [label %indirect] +; CHECK:       [[INDIRECT]]: +; CHECK-NEXT:    br label %[[EXIT:.*]] +; CHECK:       [[INDIRECT1:.*:]] +; CHECK-NEXT:    br label %[[EXIT]] +; CHECK:       [[EXIT]]: +; CHECK-NEXT:    ret void +; +callbr: +  callbr void asm "", "!i"() to label %fallthrough [label %indirect] +fallthrough: +  br label %exit +indirect: +  br label %exit +exit: +  ret void +} + +; Callbr nested in non-callbr: non-callbr is transformed +define void @callbr_in_non_callbr(i1 %c) { +; CHECK-LABEL: define void @callbr_in_non_callbr( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT:    [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT:    br i1 [[C_INV]], label %[[NOCALLBR:.*]], label %[[FLOW:.*]] +; CHECK:       [[FLOW]]: +; CHECK-NEXT:    [[TMP1:%.*]] = phi i1 [ false, %[[NOCALLBR]] ], [ true, [[TMP0:%.*]] ] +; CHECK-NEXT:    br i1 [[TMP1]], label %[[CALLBR:.*]], label %[[EXIT:.*]] +; CHECK:       [[CALLBR]]: +; CHECK-NEXT:    callbr void asm "", "!i"() +; CHECK-NEXT:            to label %[[INDIRECT:.*]] [label %indirect] +; CHECK:       [[INDIRECT]]: +; CHECK-NEXT:    br label %[[EXIT]] +; CHECK:       [[INDIRECT1:.*:]] +; CHECK-NEXT:    br label %[[EXIT]] +; CHECK:       [[NOCALLBR]]: +; CHECK-NEXT:    br label %[[FLOW]] +; CHECK:       [[EXIT]]: +; CHECK-NEXT:    ret void +; +  br i1 %c, label %callbr, label %nocallbr +callbr: +  callbr void asm "", "!i"() to label %fallthrough [label %indirect] +fallthrough: +  br label %exit +indirect: +  br label %exit +nocallbr: +  br label %exit +exit: +  ret void +} + +; Callbr parent of non-callbr: non-callbr is transformed +define void @non_callbr_in_callbr(i1 %c) { +; CHECK-LABEL: define void @non_callbr_in_callbr( +; CHECK-SAME: i1 [[C:%.*]]) { +; CHECK-NEXT:    [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT:    callbr void asm "", "!i"() +; CHECK-NEXT:            to label %[[INDIRECT:.*]] [label %indirect] +; CHECK:       [[INDIRECT]]: +; CHECK-NEXT:    br i1 [[C_INV]], label %[[FALLTHROUGH2:.*]], label %[[FLOW:.*]] +; CHECK:       [[FLOW]]: +; CHECK-NEXT:    [[TMP1:%.*]] = phi i1 [ false, %[[FALLTHROUGH2]] ], [ true, %[[INDIRECT]] ] +; CHECK-NEXT:    br i1 [[TMP1]], label %[[FALLTHROUGH1:.*]], label %[[FLOW1:.*]] +; CHECK:       [[FALLTHROUGH1]]: +; CHECK-NEXT:    br label %[[FLOW1]] +; CHECK:       [[FALLTHROUGH2]]: +; CHECK-NEXT:    br label %[[FLOW]] +; CHECK:       [[INDIRECT1:.*:]] +; CHECK-NEXT:    br label %[[EXIT:.*]] +; CHECK:       [[FLOW1]]: +; CHECK-NEXT:    br label %[[EXIT]] +; CHECK:       [[EXIT]]: +; CHECK-NEXT:    ret void +; +  callbr void asm "", "!i"() to label %fallthrough [label %indirect] +fallthrough: +  br i1 %c, label %fallthrough1, label %fallthrough2 +fallthrough1: +  br label %exit +fallthrough2: +  br label %exit +indirect: +  br label %exit +exit: +  ret void +} + +; Callbr surrounded by non-callbr: all three regular branches are handled +; correctly +define void @callbr_nested_in_non_callbr(i1 %c, i1 %d, i1 %e, i1 %f) { +; CHECK-LABEL: define void @callbr_nested_in_non_callbr( +; CHECK-SAME: i1 [[C:%.*]], i1 [[D:%.*]], i1 [[E:%.*]], i1 [[F:%.*]]) { +; CHECK-NEXT:    [[C_INV:%.*]] = xor i1 [[C]], true +; CHECK-NEXT:    br i1 [[C_INV]], label %[[NOCALLBR:.*]], label %[[FLOW3:.*]] +; CHECK:       [[FLOW3]]: +; CHECK-NEXT:    [[TMP1:%.*]] = phi i1 [ false, %[[FLOW:.*]] ], [ true, [[TMP0:%.*]] ] +; CHECK-NEXT:    br i1 [[TMP1]], label %[[CALLBR:.*]], label %[[RET:.*]] +; CHECK:       [[CALLBR]]: +; CHECK-NEXT:    callbr void asm "", "!i"() +; CHECK-NEXT:            to label %[[INDIRECT:.*]] [label %indirect] +; CHECK:       [[INDIRECT]]: +; CHECK-NEXT:    br i1 [[D]], label %[[FALLTHROUGH1:.*]], label %[[FLOW2:.*]] +; CHECK:       [[FALLTHROUGH1]]: +; CHECK-NEXT:    br label %[[FLOW2]] +; CHECK:       [[INDIRECT2:.*:]] +; CHECK-NEXT:    br i1 [[E]], label %[[INDIRECT1:.*]], label %[[FLOW1:.*]] +; CHECK:       [[INDIRECT1]]: +; CHECK-NEXT:    br label %[[FLOW1]] +; CHECK:       [[NOCALLBR]]: +; CHECK-NEXT:    br i1 [[F]], label %[[NOCALLBR1:.*]], label %[[FLOW]] +; CHECK:       [[NOCALLBR1]]: +; CHECK-NEXT:    br label %[[FLOW]] +; CHECK:       [[FLOW]]: +; CHECK-NEXT:    br label %[[FLOW3]] +; CHECK:       [[FLOW1]]: +; CHECK-NEXT:    br label %[[RET]] +; CHECK:       [[FLOW2]]: +; CHECK-NEXT:    br label %[[RET]] +; CHECK:       [[RET]]: +; CHECK-NEXT:    ret void +; +  br i1 %c, label %callbr, label %nocallbr +callbr: +  callbr void asm "", "!i"() to label %fallthrough [label %indirect] +fallthrough: +  br i1 %d, label %fallthrough1, label %ret +fallthrough1: +  br label %ret +indirect: +  br i1 %e, label %indirect1, label %ret +indirect1: +  br label %ret +nocallbr: +  br i1 %f, label %nocallbr1, label %ret +nocallbr1: +  br label %ret +ret: +  ret void +} diff --git a/llvm/test/Transforms/VectorCombine/AMDGPU/extract-insert-chain-to-shuffles.ll b/llvm/test/Transforms/VectorCombine/AMDGPU/extract-insert-chain-to-shuffles.ll new file mode 100644 index 0000000..4b551fa --- /dev/null +++ b/llvm/test/Transforms/VectorCombine/AMDGPU/extract-insert-chain-to-shuffles.ll @@ -0,0 +1,567 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt -S -mtriple=amdgcn-- -mcpu=gfx1100 -passes=vector-combine < %s | FileCheck -check-prefix=OPT %s + +; Generated from amdgpu-promote-alloca on array of vectors +; VectorCombiner should recognize chain of extract-insert vectors +; and turn them into one or two shuffles +define amdgpu_kernel void @extract_insert_chain_to_shuffles(<16 x i8> %in, <16 x i8> %add, ptr addrspace(3) %out) #0 { +; OPT-LABEL: define amdgpu_kernel void @extract_insert_chain_to_shuffles( +; OPT-SAME: <16 x i8> [[IN:%.*]], <16 x i8> [[ADD:%.*]], ptr addrspace(3) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] { +; OPT-NEXT:  [[ENTRY:.*:]] +; OPT-NEXT:    [[ALLOCA:%.*]] = freeze <128 x i8> poison +; OPT-NEXT:    [[TMP0:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP1:%.*]] = shufflevector <128 x i8> [[ALLOCA]], <128 x i8> [[TMP0]], <128 x i32> <i32 128, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP2:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP3:%.*]] = insertelement <128 x i8> [[TMP1]], i8 [[TMP2]], i32 1 +; OPT-NEXT:    [[TMP4:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP5:%.*]] = insertelement <128 x i8> [[TMP3]], i8 [[TMP4]], i32 2 +; OPT-NEXT:    [[TMP6:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP7:%.*]] = insertelement <128 x i8> [[TMP5]], i8 [[TMP6]], i32 3 +; OPT-NEXT:    [[TMP8:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP9:%.*]] = insertelement <128 x i8> [[TMP7]], i8 [[TMP8]], i32 4 +; OPT-NEXT:    [[TMP10:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP11:%.*]] = insertelement <128 x i8> [[TMP9]], i8 [[TMP10]], i32 5 +; OPT-NEXT:    [[TMP12:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP13:%.*]] = insertelement <128 x i8> [[TMP11]], i8 [[TMP12]], i32 6 +; OPT-NEXT:    [[TMP14:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP15:%.*]] = insertelement <128 x i8> [[TMP13]], i8 [[TMP14]], i32 7 +; OPT-NEXT:    [[TMP16:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP17:%.*]] = insertelement <128 x i8> [[TMP15]], i8 [[TMP16]], i32 8 +; OPT-NEXT:    [[TMP18:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP19:%.*]] = insertelement <128 x i8> [[TMP17]], i8 [[TMP18]], i32 9 +; OPT-NEXT:    [[TMP20:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP21:%.*]] = insertelement <128 x i8> [[TMP19]], i8 [[TMP20]], i32 10 +; OPT-NEXT:    [[TMP22:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP23:%.*]] = insertelement <128 x i8> [[TMP21]], i8 [[TMP22]], i32 11 +; OPT-NEXT:    [[TMP24:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP25:%.*]] = insertelement <128 x i8> [[TMP23]], i8 [[TMP24]], i32 12 +; OPT-NEXT:    [[TMP26:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP27:%.*]] = insertelement <128 x i8> [[TMP25]], i8 [[TMP26]], i32 13 +; OPT-NEXT:    [[TMP28:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP29:%.*]] = insertelement <128 x i8> [[TMP27]], i8 [[TMP28]], i32 14 +; OPT-NEXT:    [[TMP30:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP31:%.*]] = insertelement <128 x i8> [[TMP29]], i8 [[TMP30]], i32 15 +; OPT-NEXT:    [[TMP32:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP33:%.*]] = shufflevector <128 x i8> [[TMP31]], <128 x i8> [[TMP32]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 128, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP34:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP35:%.*]] = insertelement <128 x i8> [[TMP33]], i8 [[TMP34]], i32 17 +; OPT-NEXT:    [[TMP36:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP37:%.*]] = insertelement <128 x i8> [[TMP35]], i8 [[TMP36]], i32 18 +; OPT-NEXT:    [[TMP38:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP39:%.*]] = insertelement <128 x i8> [[TMP37]], i8 [[TMP38]], i32 19 +; OPT-NEXT:    [[TMP40:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP41:%.*]] = insertelement <128 x i8> [[TMP39]], i8 [[TMP40]], i32 20 +; OPT-NEXT:    [[TMP42:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP43:%.*]] = insertelement <128 x i8> [[TMP41]], i8 [[TMP42]], i32 21 +; OPT-NEXT:    [[TMP44:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP45:%.*]] = insertelement <128 x i8> [[TMP43]], i8 [[TMP44]], i32 22 +; OPT-NEXT:    [[TMP46:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP47:%.*]] = insertelement <128 x i8> [[TMP45]], i8 [[TMP46]], i32 23 +; OPT-NEXT:    [[TMP48:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP49:%.*]] = insertelement <128 x i8> [[TMP47]], i8 [[TMP48]], i32 24 +; OPT-NEXT:    [[TMP50:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP51:%.*]] = insertelement <128 x i8> [[TMP49]], i8 [[TMP50]], i32 25 +; OPT-NEXT:    [[TMP52:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP53:%.*]] = insertelement <128 x i8> [[TMP51]], i8 [[TMP52]], i32 26 +; OPT-NEXT:    [[TMP54:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP55:%.*]] = insertelement <128 x i8> [[TMP53]], i8 [[TMP54]], i32 27 +; OPT-NEXT:    [[TMP56:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP57:%.*]] = insertelement <128 x i8> [[TMP55]], i8 [[TMP56]], i32 28 +; OPT-NEXT:    [[TMP58:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP59:%.*]] = insertelement <128 x i8> [[TMP57]], i8 [[TMP58]], i32 29 +; OPT-NEXT:    [[TMP60:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP61:%.*]] = insertelement <128 x i8> [[TMP59]], i8 [[TMP60]], i32 30 +; OPT-NEXT:    [[TMP62:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP63:%.*]] = insertelement <128 x i8> [[TMP61]], i8 [[TMP62]], i32 31 +; OPT-NEXT:    [[TMP64:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP65:%.*]] = shufflevector <128 x i8> [[TMP63]], <128 x i8> [[TMP64]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 128, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP66:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP67:%.*]] = insertelement <128 x i8> [[TMP65]], i8 [[TMP66]], i32 33 +; OPT-NEXT:    [[TMP68:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP69:%.*]] = insertelement <128 x i8> [[TMP67]], i8 [[TMP68]], i32 34 +; OPT-NEXT:    [[TMP70:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP71:%.*]] = insertelement <128 x i8> [[TMP69]], i8 [[TMP70]], i32 35 +; OPT-NEXT:    [[TMP72:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP73:%.*]] = insertelement <128 x i8> [[TMP71]], i8 [[TMP72]], i32 36 +; OPT-NEXT:    [[TMP74:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP75:%.*]] = insertelement <128 x i8> [[TMP73]], i8 [[TMP74]], i32 37 +; OPT-NEXT:    [[TMP76:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP77:%.*]] = insertelement <128 x i8> [[TMP75]], i8 [[TMP76]], i32 38 +; OPT-NEXT:    [[TMP78:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP79:%.*]] = insertelement <128 x i8> [[TMP77]], i8 [[TMP78]], i32 39 +; OPT-NEXT:    [[TMP80:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP81:%.*]] = insertelement <128 x i8> [[TMP79]], i8 [[TMP80]], i32 40 +; OPT-NEXT:    [[TMP82:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP83:%.*]] = insertelement <128 x i8> [[TMP81]], i8 [[TMP82]], i32 41 +; OPT-NEXT:    [[TMP84:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP85:%.*]] = insertelement <128 x i8> [[TMP83]], i8 [[TMP84]], i32 42 +; OPT-NEXT:    [[TMP86:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP87:%.*]] = insertelement <128 x i8> [[TMP85]], i8 [[TMP86]], i32 43 +; OPT-NEXT:    [[TMP88:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP89:%.*]] = insertelement <128 x i8> [[TMP87]], i8 [[TMP88]], i32 44 +; OPT-NEXT:    [[TMP90:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP91:%.*]] = insertelement <128 x i8> [[TMP89]], i8 [[TMP90]], i32 45 +; OPT-NEXT:    [[TMP92:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP93:%.*]] = insertelement <128 x i8> [[TMP91]], i8 [[TMP92]], i32 46 +; OPT-NEXT:    [[TMP94:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP95:%.*]] = insertelement <128 x i8> [[TMP93]], i8 [[TMP94]], i32 47 +; OPT-NEXT:    [[TMP96:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP97:%.*]] = shufflevector <128 x i8> [[TMP95]], <128 x i8> [[TMP96]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 128, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP98:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP99:%.*]] = insertelement <128 x i8> [[TMP97]], i8 [[TMP98]], i32 49 +; OPT-NEXT:    [[TMP100:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP101:%.*]] = insertelement <128 x i8> [[TMP99]], i8 [[TMP100]], i32 50 +; OPT-NEXT:    [[TMP102:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP103:%.*]] = insertelement <128 x i8> [[TMP101]], i8 [[TMP102]], i32 51 +; OPT-NEXT:    [[TMP104:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP105:%.*]] = insertelement <128 x i8> [[TMP103]], i8 [[TMP104]], i32 52 +; OPT-NEXT:    [[TMP106:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP107:%.*]] = insertelement <128 x i8> [[TMP105]], i8 [[TMP106]], i32 53 +; OPT-NEXT:    [[TMP108:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP109:%.*]] = insertelement <128 x i8> [[TMP107]], i8 [[TMP108]], i32 54 +; OPT-NEXT:    [[TMP110:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP111:%.*]] = insertelement <128 x i8> [[TMP109]], i8 [[TMP110]], i32 55 +; OPT-NEXT:    [[TMP112:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP113:%.*]] = insertelement <128 x i8> [[TMP111]], i8 [[TMP112]], i32 56 +; OPT-NEXT:    [[TMP114:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP115:%.*]] = insertelement <128 x i8> [[TMP113]], i8 [[TMP114]], i32 57 +; OPT-NEXT:    [[TMP116:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP117:%.*]] = insertelement <128 x i8> [[TMP115]], i8 [[TMP116]], i32 58 +; OPT-NEXT:    [[TMP118:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP119:%.*]] = insertelement <128 x i8> [[TMP117]], i8 [[TMP118]], i32 59 +; OPT-NEXT:    [[TMP120:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP121:%.*]] = insertelement <128 x i8> [[TMP119]], i8 [[TMP120]], i32 60 +; OPT-NEXT:    [[TMP122:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP123:%.*]] = insertelement <128 x i8> [[TMP121]], i8 [[TMP122]], i32 61 +; OPT-NEXT:    [[TMP124:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP125:%.*]] = insertelement <128 x i8> [[TMP123]], i8 [[TMP124]], i32 62 +; OPT-NEXT:    [[TMP126:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP127:%.*]] = insertelement <128 x i8> [[TMP125]], i8 [[TMP126]], i32 63 +; OPT-NEXT:    [[TMP128:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP129:%.*]] = shufflevector <128 x i8> [[TMP127]], <128 x i8> [[TMP128]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 128, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP130:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP131:%.*]] = insertelement <128 x i8> [[TMP129]], i8 [[TMP130]], i32 65 +; OPT-NEXT:    [[TMP132:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP133:%.*]] = insertelement <128 x i8> [[TMP131]], i8 [[TMP132]], i32 66 +; OPT-NEXT:    [[TMP134:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP135:%.*]] = insertelement <128 x i8> [[TMP133]], i8 [[TMP134]], i32 67 +; OPT-NEXT:    [[TMP136:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP137:%.*]] = insertelement <128 x i8> [[TMP135]], i8 [[TMP136]], i32 68 +; OPT-NEXT:    [[TMP138:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP139:%.*]] = insertelement <128 x i8> [[TMP137]], i8 [[TMP138]], i32 69 +; OPT-NEXT:    [[TMP140:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP141:%.*]] = insertelement <128 x i8> [[TMP139]], i8 [[TMP140]], i32 70 +; OPT-NEXT:    [[TMP142:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP143:%.*]] = insertelement <128 x i8> [[TMP141]], i8 [[TMP142]], i32 71 +; OPT-NEXT:    [[TMP144:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP145:%.*]] = insertelement <128 x i8> [[TMP143]], i8 [[TMP144]], i32 72 +; OPT-NEXT:    [[TMP146:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP147:%.*]] = insertelement <128 x i8> [[TMP145]], i8 [[TMP146]], i32 73 +; OPT-NEXT:    [[TMP148:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP149:%.*]] = insertelement <128 x i8> [[TMP147]], i8 [[TMP148]], i32 74 +; OPT-NEXT:    [[TMP150:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP151:%.*]] = insertelement <128 x i8> [[TMP149]], i8 [[TMP150]], i32 75 +; OPT-NEXT:    [[TMP152:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP153:%.*]] = insertelement <128 x i8> [[TMP151]], i8 [[TMP152]], i32 76 +; OPT-NEXT:    [[TMP154:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP155:%.*]] = insertelement <128 x i8> [[TMP153]], i8 [[TMP154]], i32 77 +; OPT-NEXT:    [[TMP156:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP157:%.*]] = insertelement <128 x i8> [[TMP155]], i8 [[TMP156]], i32 78 +; OPT-NEXT:    [[TMP158:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP159:%.*]] = insertelement <128 x i8> [[TMP157]], i8 [[TMP158]], i32 79 +; OPT-NEXT:    [[TMP160:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP161:%.*]] = shufflevector <128 x i8> [[TMP159]], <128 x i8> [[TMP160]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 128, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP162:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP163:%.*]] = insertelement <128 x i8> [[TMP161]], i8 [[TMP162]], i32 81 +; OPT-NEXT:    [[TMP164:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP165:%.*]] = insertelement <128 x i8> [[TMP163]], i8 [[TMP164]], i32 82 +; OPT-NEXT:    [[TMP166:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP167:%.*]] = insertelement <128 x i8> [[TMP165]], i8 [[TMP166]], i32 83 +; OPT-NEXT:    [[TMP168:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP169:%.*]] = insertelement <128 x i8> [[TMP167]], i8 [[TMP168]], i32 84 +; OPT-NEXT:    [[TMP170:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP171:%.*]] = insertelement <128 x i8> [[TMP169]], i8 [[TMP170]], i32 85 +; OPT-NEXT:    [[TMP172:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP173:%.*]] = insertelement <128 x i8> [[TMP171]], i8 [[TMP172]], i32 86 +; OPT-NEXT:    [[TMP174:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP175:%.*]] = insertelement <128 x i8> [[TMP173]], i8 [[TMP174]], i32 87 +; OPT-NEXT:    [[TMP176:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP177:%.*]] = insertelement <128 x i8> [[TMP175]], i8 [[TMP176]], i32 88 +; OPT-NEXT:    [[TMP178:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP179:%.*]] = insertelement <128 x i8> [[TMP177]], i8 [[TMP178]], i32 89 +; OPT-NEXT:    [[TMP180:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP181:%.*]] = insertelement <128 x i8> [[TMP179]], i8 [[TMP180]], i32 90 +; OPT-NEXT:    [[TMP182:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP183:%.*]] = insertelement <128 x i8> [[TMP181]], i8 [[TMP182]], i32 91 +; OPT-NEXT:    [[TMP184:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP185:%.*]] = insertelement <128 x i8> [[TMP183]], i8 [[TMP184]], i32 92 +; OPT-NEXT:    [[TMP186:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP187:%.*]] = insertelement <128 x i8> [[TMP185]], i8 [[TMP186]], i32 93 +; OPT-NEXT:    [[TMP188:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP189:%.*]] = insertelement <128 x i8> [[TMP187]], i8 [[TMP188]], i32 94 +; OPT-NEXT:    [[TMP190:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP191:%.*]] = insertelement <128 x i8> [[TMP189]], i8 [[TMP190]], i32 95 +; OPT-NEXT:    [[TMP192:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP193:%.*]] = shufflevector <128 x i8> [[TMP191]], <128 x i8> [[TMP192]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 128, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP194:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP195:%.*]] = insertelement <128 x i8> [[TMP193]], i8 [[TMP194]], i32 97 +; OPT-NEXT:    [[TMP196:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP197:%.*]] = insertelement <128 x i8> [[TMP195]], i8 [[TMP196]], i32 98 +; OPT-NEXT:    [[TMP198:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP199:%.*]] = insertelement <128 x i8> [[TMP197]], i8 [[TMP198]], i32 99 +; OPT-NEXT:    [[TMP200:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP201:%.*]] = insertelement <128 x i8> [[TMP199]], i8 [[TMP200]], i32 100 +; OPT-NEXT:    [[TMP202:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP203:%.*]] = insertelement <128 x i8> [[TMP201]], i8 [[TMP202]], i32 101 +; OPT-NEXT:    [[TMP204:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP205:%.*]] = insertelement <128 x i8> [[TMP203]], i8 [[TMP204]], i32 102 +; OPT-NEXT:    [[TMP206:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP207:%.*]] = insertelement <128 x i8> [[TMP205]], i8 [[TMP206]], i32 103 +; OPT-NEXT:    [[TMP208:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP209:%.*]] = insertelement <128 x i8> [[TMP207]], i8 [[TMP208]], i32 104 +; OPT-NEXT:    [[TMP210:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP211:%.*]] = insertelement <128 x i8> [[TMP209]], i8 [[TMP210]], i32 105 +; OPT-NEXT:    [[TMP212:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP213:%.*]] = insertelement <128 x i8> [[TMP211]], i8 [[TMP212]], i32 106 +; OPT-NEXT:    [[TMP214:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP215:%.*]] = insertelement <128 x i8> [[TMP213]], i8 [[TMP214]], i32 107 +; OPT-NEXT:    [[TMP216:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP217:%.*]] = insertelement <128 x i8> [[TMP215]], i8 [[TMP216]], i32 108 +; OPT-NEXT:    [[TMP218:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP219:%.*]] = insertelement <128 x i8> [[TMP217]], i8 [[TMP218]], i32 109 +; OPT-NEXT:    [[TMP220:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP221:%.*]] = insertelement <128 x i8> [[TMP219]], i8 [[TMP220]], i32 110 +; OPT-NEXT:    [[TMP222:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP223:%.*]] = insertelement <128 x i8> [[TMP221]], i8 [[TMP222]], i32 111 +; OPT-NEXT:    [[TMP224:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <128 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP225:%.*]] = shufflevector <128 x i8> [[TMP223]], <128 x i8> [[TMP224]], <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 128, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127> +; OPT-NEXT:    [[TMP226:%.*]] = extractelement <16 x i8> [[IN]], i64 1 +; OPT-NEXT:    [[TMP227:%.*]] = insertelement <128 x i8> [[TMP225]], i8 [[TMP226]], i32 113 +; OPT-NEXT:    [[TMP228:%.*]] = extractelement <16 x i8> [[IN]], i64 2 +; OPT-NEXT:    [[TMP229:%.*]] = insertelement <128 x i8> [[TMP227]], i8 [[TMP228]], i32 114 +; OPT-NEXT:    [[TMP230:%.*]] = extractelement <16 x i8> [[IN]], i64 3 +; OPT-NEXT:    [[TMP231:%.*]] = insertelement <128 x i8> [[TMP229]], i8 [[TMP230]], i32 115 +; OPT-NEXT:    [[TMP232:%.*]] = extractelement <16 x i8> [[IN]], i64 4 +; OPT-NEXT:    [[TMP233:%.*]] = insertelement <128 x i8> [[TMP231]], i8 [[TMP232]], i32 116 +; OPT-NEXT:    [[TMP234:%.*]] = extractelement <16 x i8> [[IN]], i64 5 +; OPT-NEXT:    [[TMP235:%.*]] = insertelement <128 x i8> [[TMP233]], i8 [[TMP234]], i32 117 +; OPT-NEXT:    [[TMP236:%.*]] = extractelement <16 x i8> [[IN]], i64 6 +; OPT-NEXT:    [[TMP237:%.*]] = insertelement <128 x i8> [[TMP235]], i8 [[TMP236]], i32 118 +; OPT-NEXT:    [[TMP238:%.*]] = extractelement <16 x i8> [[IN]], i64 7 +; OPT-NEXT:    [[TMP239:%.*]] = insertelement <128 x i8> [[TMP237]], i8 [[TMP238]], i32 119 +; OPT-NEXT:    [[TMP240:%.*]] = extractelement <16 x i8> [[IN]], i64 8 +; OPT-NEXT:    [[TMP241:%.*]] = insertelement <128 x i8> [[TMP239]], i8 [[TMP240]], i32 120 +; OPT-NEXT:    [[TMP242:%.*]] = extractelement <16 x i8> [[IN]], i64 9 +; OPT-NEXT:    [[TMP243:%.*]] = insertelement <128 x i8> [[TMP241]], i8 [[TMP242]], i32 121 +; OPT-NEXT:    [[TMP244:%.*]] = extractelement <16 x i8> [[IN]], i64 10 +; OPT-NEXT:    [[TMP245:%.*]] = insertelement <128 x i8> [[TMP243]], i8 [[TMP244]], i32 122 +; OPT-NEXT:    [[TMP246:%.*]] = extractelement <16 x i8> [[IN]], i64 11 +; OPT-NEXT:    [[TMP247:%.*]] = insertelement <128 x i8> [[TMP245]], i8 [[TMP246]], i32 123 +; OPT-NEXT:    [[TMP248:%.*]] = extractelement <16 x i8> [[IN]], i64 12 +; OPT-NEXT:    [[TMP249:%.*]] = insertelement <128 x i8> [[TMP247]], i8 [[TMP248]], i32 124 +; OPT-NEXT:    [[TMP250:%.*]] = extractelement <16 x i8> [[IN]], i64 13 +; OPT-NEXT:    [[TMP251:%.*]] = insertelement <128 x i8> [[TMP249]], i8 [[TMP250]], i32 125 +; OPT-NEXT:    [[TMP252:%.*]] = extractelement <16 x i8> [[IN]], i64 14 +; OPT-NEXT:    [[TMP253:%.*]] = insertelement <128 x i8> [[TMP251]], i8 [[TMP252]], i32 126 +; OPT-NEXT:    [[TMP254:%.*]] = extractelement <16 x i8> [[IN]], i64 15 +; OPT-NEXT:    [[TMP255:%.*]] = insertelement <128 x i8> [[TMP253]], i8 [[TMP254]], i32 127 +; OPT-NEXT:    [[TMP256:%.*]] = shufflevector <16 x i8> [[IN]], <16 x i8> poison, <16 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison> +; OPT-NEXT:    [[TMP257:%.*]] = insertelement <16 x i8> [[TMP256]], i8 [[TMP162]], i64 1 +; OPT-NEXT:    [[TMP258:%.*]] = insertelement <16 x i8> [[TMP257]], i8 [[TMP164]], i64 2 +; OPT-NEXT:    [[TMP259:%.*]] = insertelement <16 x i8> [[TMP258]], i8 [[TMP166]], i64 3 +; OPT-NEXT:    [[TMP260:%.*]] = insertelement <16 x i8> [[TMP259]], i8 [[TMP168]], i64 4 +; OPT-NEXT:    [[TMP261:%.*]] = insertelement <16 x i8> [[TMP260]], i8 [[TMP170]], i64 5 +; OPT-NEXT:    [[TMP262:%.*]] = insertelement <16 x i8> [[TMP261]], i8 [[TMP172]], i64 6 +; OPT-NEXT:    [[TMP263:%.*]] = insertelement <16 x i8> [[TMP262]], i8 [[TMP174]], i64 7 +; OPT-NEXT:    [[TMP264:%.*]] = insertelement <16 x i8> [[TMP263]], i8 [[TMP176]], i64 8 +; OPT-NEXT:    [[TMP265:%.*]] = insertelement <16 x i8> [[TMP264]], i8 [[TMP178]], i64 9 +; OPT-NEXT:    [[TMP266:%.*]] = insertelement <16 x i8> [[TMP265]], i8 [[TMP180]], i64 10 +; OPT-NEXT:    [[TMP267:%.*]] = insertelement <16 x i8> [[TMP266]], i8 [[TMP182]], i64 11 +; OPT-NEXT:    [[TMP268:%.*]] = insertelement <16 x i8> [[TMP267]], i8 [[TMP184]], i64 12 +; OPT-NEXT:    [[TMP269:%.*]] = insertelement <16 x i8> [[TMP268]], i8 [[TMP186]], i64 13 +; OPT-NEXT:    [[TMP270:%.*]] = insertelement <16 x i8> [[TMP269]], i8 [[TMP188]], i64 14 +; OPT-NEXT:    [[TMP271:%.*]] = shufflevector <16 x i8> [[TMP270]], <16 x i8> [[IN]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 31> +; OPT-NEXT:    [[SUM:%.*]] = add <16 x i8> [[TMP271]], [[ADD]] +; OPT-NEXT:    store <16 x i8> [[SUM]], ptr addrspace(3) [[OUT]], align 16 +; OPT-NEXT:    ret void +; +entry: +  %alloca = freeze <128 x i8> poison +  %0 = extractelement <16 x i8> %in, i64 0 +  %1 = insertelement <128 x i8> %alloca, i8 %0, i32 0 +  %2 = extractelement <16 x i8> %in, i64 1 +  %3 = insertelement <128 x i8> %1, i8 %2, i32 1 +  %4 = extractelement <16 x i8> %in, i64 2 +  %5 = insertelement <128 x i8> %3, i8 %4, i32 2 +  %6 = extractelement <16 x i8> %in, i64 3 +  %7 = insertelement <128 x i8> %5, i8 %6, i32 3 +  %8 = extractelement <16 x i8> %in, i64 4 +  %9 = insertelement <128 x i8> %7, i8 %8, i32 4 +  %10 = extractelement <16 x i8> %in, i64 5 +  %11 = insertelement <128 x i8> %9, i8 %10, i32 5 +  %12 = extractelement <16 x i8> %in, i64 6 +  %13 = insertelement <128 x i8> %11, i8 %12, i32 6 +  %14 = extractelement <16 x i8> %in, i64 7 +  %15 = insertelement <128 x i8> %13, i8 %14, i32 7 +  %16 = extractelement <16 x i8> %in, i64 8 +  %17 = insertelement <128 x i8> %15, i8 %16, i32 8 +  %18 = extractelement <16 x i8> %in, i64 9 +  %19 = insertelement <128 x i8> %17, i8 %18, i32 9 +  %20 = extractelement <16 x i8> %in, i64 10 +  %21 = insertelement <128 x i8> %19, i8 %20, i32 10 +  %22 = extractelement <16 x i8> %in, i64 11 +  %23 = insertelement <128 x i8> %21, i8 %22, i32 11 +  %24 = extractelement <16 x i8> %in, i64 12 +  %25 = insertelement <128 x i8> %23, i8 %24, i32 12 +  %26 = extractelement <16 x i8> %in, i64 13 +  %27 = insertelement <128 x i8> %25, i8 %26, i32 13 +  %28 = extractelement <16 x i8> %in, i64 14 +  %29 = insertelement <128 x i8> %27, i8 %28, i32 14 +  %30 = extractelement <16 x i8> %in, i64 15 +  %31 = insertelement <128 x i8> %29, i8 %30, i32 15 +  %32 = extractelement <16 x i8> %in, i64 0 +  %33 = insertelement <128 x i8> %31, i8 %32, i32 16 +  %34 = extractelement <16 x i8> %in, i64 1 +  %35 = insertelement <128 x i8> %33, i8 %34, i32 17 +  %36 = extractelement <16 x i8> %in, i64 2 +  %37 = insertelement <128 x i8> %35, i8 %36, i32 18 +  %38 = extractelement <16 x i8> %in, i64 3 +  %39 = insertelement <128 x i8> %37, i8 %38, i32 19 +  %40 = extractelement <16 x i8> %in, i64 4 +  %41 = insertelement <128 x i8> %39, i8 %40, i32 20 +  %42 = extractelement <16 x i8> %in, i64 5 +  %43 = insertelement <128 x i8> %41, i8 %42, i32 21 +  %44 = extractelement <16 x i8> %in, i64 6 +  %45 = insertelement <128 x i8> %43, i8 %44, i32 22 +  %46 = extractelement <16 x i8> %in, i64 7 +  %47 = insertelement <128 x i8> %45, i8 %46, i32 23 +  %48 = extractelement <16 x i8> %in, i64 8 +  %49 = insertelement <128 x i8> %47, i8 %48, i32 24 +  %50 = extractelement <16 x i8> %in, i64 9 +  %51 = insertelement <128 x i8> %49, i8 %50, i32 25 +  %52 = extractelement <16 x i8> %in, i64 10 +  %53 = insertelement <128 x i8> %51, i8 %52, i32 26 +  %54 = extractelement <16 x i8> %in, i64 11 +  %55 = insertelement <128 x i8> %53, i8 %54, i32 27 +  %56 = extractelement <16 x i8> %in, i64 12 +  %57 = insertelement <128 x i8> %55, i8 %56, i32 28 +  %58 = extractelement <16 x i8> %in, i64 13 +  %59 = insertelement <128 x i8> %57, i8 %58, i32 29 +  %60 = extractelement <16 x i8> %in, i64 14 +  %61 = insertelement <128 x i8> %59, i8 %60, i32 30 +  %62 = extractelement <16 x i8> %in, i64 15 +  %63 = insertelement <128 x i8> %61, i8 %62, i32 31 +  %64 = extractelement <16 x i8> %in, i64 0 +  %65 = insertelement <128 x i8> %63, i8 %64, i32 32 +  %66 = extractelement <16 x i8> %in, i64 1 +  %67 = insertelement <128 x i8> %65, i8 %66, i32 33 +  %68 = extractelement <16 x i8> %in, i64 2 +  %69 = insertelement <128 x i8> %67, i8 %68, i32 34 +  %70 = extractelement <16 x i8> %in, i64 3 +  %71 = insertelement <128 x i8> %69, i8 %70, i32 35 +  %72 = extractelement <16 x i8> %in, i64 4 +  %73 = insertelement <128 x i8> %71, i8 %72, i32 36 +  %74 = extractelement <16 x i8> %in, i64 5 +  %75 = insertelement <128 x i8> %73, i8 %74, i32 37 +  %76 = extractelement <16 x i8> %in, i64 6 +  %77 = insertelement <128 x i8> %75, i8 %76, i32 38 +  %78 = extractelement <16 x i8> %in, i64 7 +  %79 = insertelement <128 x i8> %77, i8 %78, i32 39 +  %80 = extractelement <16 x i8> %in, i64 8 +  %81 = insertelement <128 x i8> %79, i8 %80, i32 40 +  %82 = extractelement <16 x i8> %in, i64 9 +  %83 = insertelement <128 x i8> %81, i8 %82, i32 41 +  %84 = extractelement <16 x i8> %in, i64 10 +  %85 = insertelement <128 x i8> %83, i8 %84, i32 42 +  %86 = extractelement <16 x i8> %in, i64 11 +  %87 = insertelement <128 x i8> %85, i8 %86, i32 43 +  %88 = extractelement <16 x i8> %in, i64 12 +  %89 = insertelement <128 x i8> %87, i8 %88, i32 44 +  %90 = extractelement <16 x i8> %in, i64 13 +  %91 = insertelement <128 x i8> %89, i8 %90, i32 45 +  %92 = extractelement <16 x i8> %in, i64 14 +  %93 = insertelement <128 x i8> %91, i8 %92, i32 46 +  %94 = extractelement <16 x i8> %in, i64 15 +  %95 = insertelement <128 x i8> %93, i8 %94, i32 47 +  %96 = extractelement <16 x i8> %in, i64 0 +  %97 = insertelement <128 x i8> %95, i8 %96, i32 48 +  %98 = extractelement <16 x i8> %in, i64 1 +  %99 = insertelement <128 x i8> %97, i8 %98, i32 49 +  %100 = extractelement <16 x i8> %in, i64 2 +  %101 = insertelement <128 x i8> %99, i8 %100, i32 50 +  %102 = extractelement <16 x i8> %in, i64 3 +  %103 = insertelement <128 x i8> %101, i8 %102, i32 51 +  %104 = extractelement <16 x i8> %in, i64 4 +  %105 = insertelement <128 x i8> %103, i8 %104, i32 52 +  %106 = extractelement <16 x i8> %in, i64 5 +  %107 = insertelement <128 x i8> %105, i8 %106, i32 53 +  %108 = extractelement <16 x i8> %in, i64 6 +  %109 = insertelement <128 x i8> %107, i8 %108, i32 54 +  %110 = extractelement <16 x i8> %in, i64 7 +  %111 = insertelement <128 x i8> %109, i8 %110, i32 55 +  %112 = extractelement <16 x i8> %in, i64 8 +  %113 = insertelement <128 x i8> %111, i8 %112, i32 56 +  %114 = extractelement <16 x i8> %in, i64 9 +  %115 = insertelement <128 x i8> %113, i8 %114, i32 57 +  %116 = extractelement <16 x i8> %in, i64 10 +  %117 = insertelement <128 x i8> %115, i8 %116, i32 58 +  %118 = extractelement <16 x i8> %in, i64 11 +  %119 = insertelement <128 x i8> %117, i8 %118, i32 59 +  %120 = extractelement <16 x i8> %in, i64 12 +  %121 = insertelement <128 x i8> %119, i8 %120, i32 60 +  %122 = extractelement <16 x i8> %in, i64 13 +  %123 = insertelement <128 x i8> %121, i8 %122, i32 61 +  %124 = extractelement <16 x i8> %in, i64 14 +  %125 = insertelement <128 x i8> %123, i8 %124, i32 62 +  %126 = extractelement <16 x i8> %in, i64 15 +  %127 = insertelement <128 x i8> %125, i8 %126, i32 63 +  %128 = extractelement <16 x i8> %in, i64 0 +  %129 = insertelement <128 x i8> %127, i8 %128, i32 64 +  %130 = extractelement <16 x i8> %in, i64 1 +  %131 = insertelement <128 x i8> %129, i8 %130, i32 65 +  %132 = extractelement <16 x i8> %in, i64 2 +  %133 = insertelement <128 x i8> %131, i8 %132, i32 66 +  %134 = extractelement <16 x i8> %in, i64 3 +  %135 = insertelement <128 x i8> %133, i8 %134, i32 67 +  %136 = extractelement <16 x i8> %in, i64 4 +  %137 = insertelement <128 x i8> %135, i8 %136, i32 68 +  %138 = extractelement <16 x i8> %in, i64 5 +  %139 = insertelement <128 x i8> %137, i8 %138, i32 69 +  %140 = extractelement <16 x i8> %in, i64 6 +  %141 = insertelement <128 x i8> %139, i8 %140, i32 70 +  %142 = extractelement <16 x i8> %in, i64 7 +  %143 = insertelement <128 x i8> %141, i8 %142, i32 71 +  %144 = extractelement <16 x i8> %in, i64 8 +  %145 = insertelement <128 x i8> %143, i8 %144, i32 72 +  %146 = extractelement <16 x i8> %in, i64 9 +  %147 = insertelement <128 x i8> %145, i8 %146, i32 73 +  %148 = extractelement <16 x i8> %in, i64 10 +  %149 = insertelement <128 x i8> %147, i8 %148, i32 74 +  %150 = extractelement <16 x i8> %in, i64 11 +  %151 = insertelement <128 x i8> %149, i8 %150, i32 75 +  %152 = extractelement <16 x i8> %in, i64 12 +  %153 = insertelement <128 x i8> %151, i8 %152, i32 76 +  %154 = extractelement <16 x i8> %in, i64 13 +  %155 = insertelement <128 x i8> %153, i8 %154, i32 77 +  %156 = extractelement <16 x i8> %in, i64 14 +  %157 = insertelement <128 x i8> %155, i8 %156, i32 78 +  %158 = extractelement <16 x i8> %in, i64 15 +  %159 = insertelement <128 x i8> %157, i8 %158, i32 79 +  %160 = extractelement <16 x i8> %in, i64 0 +  %161 = insertelement <128 x i8> %159, i8 %160, i32 80 +  %162 = extractelement <16 x i8> %in, i64 1 +  %163 = insertelement <128 x i8> %161, i8 %162, i32 81 +  %164 = extractelement <16 x i8> %in, i64 2 +  %165 = insertelement <128 x i8> %163, i8 %164, i32 82 +  %166 = extractelement <16 x i8> %in, i64 3 +  %167 = insertelement <128 x i8> %165, i8 %166, i32 83 +  %168 = extractelement <16 x i8> %in, i64 4 +  %169 = insertelement <128 x i8> %167, i8 %168, i32 84 +  %170 = extractelement <16 x i8> %in, i64 5 +  %171 = insertelement <128 x i8> %169, i8 %170, i32 85 +  %172 = extractelement <16 x i8> %in, i64 6 +  %173 = insertelement <128 x i8> %171, i8 %172, i32 86 +  %174 = extractelement <16 x i8> %in, i64 7 +  %175 = insertelement <128 x i8> %173, i8 %174, i32 87 +  %176 = extractelement <16 x i8> %in, i64 8 +  %177 = insertelement <128 x i8> %175, i8 %176, i32 88 +  %178 = extractelement <16 x i8> %in, i64 9 +  %179 = insertelement <128 x i8> %177, i8 %178, i32 89 +  %180 = extractelement <16 x i8> %in, i64 10 +  %181 = insertelement <128 x i8> %179, i8 %180, i32 90 +  %182 = extractelement <16 x i8> %in, i64 11 +  %183 = insertelement <128 x i8> %181, i8 %182, i32 91 +  %184 = extractelement <16 x i8> %in, i64 12 +  %185 = insertelement <128 x i8> %183, i8 %184, i32 92 +  %186 = extractelement <16 x i8> %in, i64 13 +  %187 = insertelement <128 x i8> %185, i8 %186, i32 93 +  %188 = extractelement <16 x i8> %in, i64 14 +  %189 = insertelement <128 x i8> %187, i8 %188, i32 94 +  %190 = extractelement <16 x i8> %in, i64 15 +  %191 = insertelement <128 x i8> %189, i8 %190, i32 95 +  %192 = extractelement <16 x i8> %in, i64 0 +  %193 = insertelement <128 x i8> %191, i8 %192, i32 96 +  %194 = extractelement <16 x i8> %in, i64 1 +  %195 = insertelement <128 x i8> %193, i8 %194, i32 97 +  %196 = extractelement <16 x i8> %in, i64 2 +  %197 = insertelement <128 x i8> %195, i8 %196, i32 98 +  %198 = extractelement <16 x i8> %in, i64 3 +  %199 = insertelement <128 x i8> %197, i8 %198, i32 99 +  %200 = extractelement <16 x i8> %in, i64 4 +  %201 = insertelement <128 x i8> %199, i8 %200, i32 100 +  %202 = extractelement <16 x i8> %in, i64 5 +  %203 = insertelement <128 x i8> %201, i8 %202, i32 101 +  %204 = extractelement <16 x i8> %in, i64 6 +  %205 = insertelement <128 x i8> %203, i8 %204, i32 102 +  %206 = extractelement <16 x i8> %in, i64 7 +  %207 = insertelement <128 x i8> %205, i8 %206, i32 103 +  %208 = extractelement <16 x i8> %in, i64 8 +  %209 = insertelement <128 x i8> %207, i8 %208, i32 104 +  %210 = extractelement <16 x i8> %in, i64 9 +  %211 = insertelement <128 x i8> %209, i8 %210, i32 105 +  %212 = extractelement <16 x i8> %in, i64 10 +  %213 = insertelement <128 x i8> %211, i8 %212, i32 106 +  %214 = extractelement <16 x i8> %in, i64 11 +  %215 = insertelement <128 x i8> %213, i8 %214, i32 107 +  %216 = extractelement <16 x i8> %in, i64 12 +  %217 = insertelement <128 x i8> %215, i8 %216, i32 108 +  %218 = extractelement <16 x i8> %in, i64 13 +  %219 = insertelement <128 x i8> %217, i8 %218, i32 109 +  %220 = extractelement <16 x i8> %in, i64 14 +  %221 = insertelement <128 x i8> %219, i8 %220, i32 110 +  %222 = extractelement <16 x i8> %in, i64 15 +  %223 = insertelement <128 x i8> %221, i8 %222, i32 111 +  %224 = extractelement <16 x i8> %in, i64 0 +  %225 = insertelement <128 x i8> %223, i8 %224, i32 112 +  %226 = extractelement <16 x i8> %in, i64 1 +  %227 = insertelement <128 x i8> %225, i8 %226, i32 113 +  %228 = extractelement <16 x i8> %in, i64 2 +  %229 = insertelement <128 x i8> %227, i8 %228, i32 114 +  %230 = extractelement <16 x i8> %in, i64 3 +  %231 = insertelement <128 x i8> %229, i8 %230, i32 115 +  %232 = extractelement <16 x i8> %in, i64 4 +  %233 = insertelement <128 x i8> %231, i8 %232, i32 116 +  %234 = extractelement <16 x i8> %in, i64 5 +  %235 = insertelement <128 x i8> %233, i8 %234, i32 117 +  %236 = extractelement <16 x i8> %in, i64 6 +  %237 = insertelement <128 x i8> %235, i8 %236, i32 118 +  %238 = extractelement <16 x i8> %in, i64 7 +  %239 = insertelement <128 x i8> %237, i8 %238, i32 119 +  %240 = extractelement <16 x i8> %in, i64 8 +  %241 = insertelement <128 x i8> %239, i8 %240, i32 120 +  %242 = extractelement <16 x i8> %in, i64 9 +  %243 = insertelement <128 x i8> %241, i8 %242, i32 121 +  %244 = extractelement <16 x i8> %in, i64 10 +  %245 = insertelement <128 x i8> %243, i8 %244, i32 122 +  %246 = extractelement <16 x i8> %in, i64 11 +  %247 = insertelement <128 x i8> %245, i8 %246, i32 123 +  %248 = extractelement <16 x i8> %in, i64 12 +  %249 = insertelement <128 x i8> %247, i8 %248, i32 124 +  %250 = extractelement <16 x i8> %in, i64 13 +  %251 = insertelement <128 x i8> %249, i8 %250, i32 125 +  %252 = extractelement <16 x i8> %in, i64 14 +  %253 = insertelement <128 x i8> %251, i8 %252, i32 126 +  %254 = extractelement <16 x i8> %in, i64 15 +  %255 = insertelement <128 x i8> %253, i8 %254, i32 127 +  %256 = insertelement <16 x i8> poison, i8 %160, i64 0 +  %257 = insertelement <16 x i8> %256, i8 %162, i64 1 +  %258 = insertelement <16 x i8> %257, i8 %164, i64 2 +  %259 = insertelement <16 x i8> %258, i8 %166, i64 3 +  %260 = insertelement <16 x i8> %259, i8 %168, i64 4 +  %261 = insertelement <16 x i8> %260, i8 %170, i64 5 +  %262 = insertelement <16 x i8> %261, i8 %172, i64 6 +  %263 = insertelement <16 x i8> %262, i8 %174, i64 7 +  %264 = insertelement <16 x i8> %263, i8 %176, i64 8 +  %265 = insertelement <16 x i8> %264, i8 %178, i64 9 +  %266 = insertelement <16 x i8> %265, i8 %180, i64 10 +  %267 = insertelement <16 x i8> %266, i8 %182, i64 11 +  %268 = insertelement <16 x i8> %267, i8 %184, i64 12 +  %269 = insertelement <16 x i8> %268, i8 %186, i64 13 +  %270 = insertelement <16 x i8> %269, i8 %188, i64 14 +  %271 = insertelement <16 x i8> %270, i8 %190, i64 15 +  %sum = add <16 x i8> %271, %add +  store <16 x i8> %sum, ptr addrspace(3) %out, align 16 +  ret void +} + +attributes #0 = { "amdgpu-waves-per-eu"="2,2" } diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py index 974af4b..cadf781 100644 --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -57,8 +57,13 @@ if config.enable_profcheck:      # so we just exclude llvm-reduce tests from this config altogether. This should      # be fine though as profcheck config tests are mostly concerned with opt.      config.excludes.append("llvm-reduce") +    # Exclude llvm-objcopy tests - not the target of this effort, and some use +    # cat in ways that conflict with how profcheck uses it. +    config.excludes.append("llvm-objcopy")      # (Issue #161235) Temporarily exclude LoopVectorize.      config.excludes.append("LoopVectorize") +    # exclude UpdateTestChecks - they fail because of inserted prof annotations +    config.excludes.append("UpdateTestChecks")  # test_source_root: The root path where tests are located.  config.test_source_root = os.path.dirname(__file__) diff --git a/llvm/test/tools/dsymutil/ARM/swiftmodule-include-from-interface.test b/llvm/test/tools/dsymutil/ARM/swiftmodule-include-from-interface.test new file mode 100644 index 0000000..00141f12 --- /dev/null +++ b/llvm/test/tools/dsymutil/ARM/swiftmodule-include-from-interface.test @@ -0,0 +1,33 @@ +# RUN: dsymutil -include-swiftmodules-from-interface -verbose -oso-prepend-path=%p -y -o %t.dSYM  %s | FileCheck %s +# +# RUN: dsymutil -include-swiftmodules-from-interface --linker parallel -verbose -oso-prepend-path=%p -y %s -o %t-parallel.dSYM | FileCheck %s +# +# To regenerate: +# echo ''>I.swift +# echo ''>B.swift +# echo 'import I'>main.swift +# xcrun swiftc -emit-module-interface-path I.swiftinterface -enable-library-evolution I.swift +# xcrun swiftc -emit-module-path B.swiftmodule B.swift -Xfrontend -no-serialize-debugging-options +# xcrun swiftc -explicit-module-build main.swift -I. -module-cache-path cache -g -Xfrontend  -no-serialize-debugging-options +# output is "B.swiftmodule" and "cache/I*.swiftmodule" +# +# CHECK-NOT: Skipping compiled textual Swift interface: {{.*}}/Inputs/Binary.swiftmodule +# CHECK-NOT: Skipping compiled textual Swift interface: {{.*}}/Inputs/FromInterface.swiftmodule + +# +--- +triple:          'arm64-apple-darwin' +objects: +  - filename:        '../Inputs/Binary.swiftmodule' +    timestamp:       0 +    type:            50 +    symbols:         [] +  - filename:        '../Inputs/FromInterface.swiftmodule' +    timestamp:       0 +    type:            50 +    symbols:         [] +  - filename:        '../Inputs/FromInterface.swiftmodule' +    timestamp:       0 +    type:            50 +    symbols:         [] +... diff --git a/llvm/test/tools/dsymutil/cmdline.test b/llvm/test/tools/dsymutil/cmdline.test index 1574fe3..0b0bce1 100644 --- a/llvm/test/tools/dsymutil/cmdline.test +++ b/llvm/test/tools/dsymutil/cmdline.test @@ -14,6 +14,7 @@ CHECK: -fat64  CHECK: -flat  CHECK: -gen-reproducer  CHECK: -help +CHECK: -include-swiftmodules-from-interface  CHECK: -keep-function-for-static  CHECK: -no-object-timestamp  CHECK: -no-odr diff --git a/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp b/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp index b91c27e..ee1e906 100644 --- a/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp +++ b/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp @@ -794,9 +794,10 @@ bool DwarfLinkerForBinary::linkImpl(          reportWarning("Could not parse binary Swift module: " +                            toString(FromInterfaceOrErr.takeError()),                        Obj->getObjectFilename()); -        // Only skip swiftmodules that could be parsed and are -        // positively identified as textual. -      } else if (*FromInterfaceOrErr) { +        // Only skip swiftmodules that could be parsed and are positively +        // identified as textual. Do so only when the option allows. +      } else if (*FromInterfaceOrErr && +                 !Options.IncludeSwiftModulesFromInterface) {          if (Options.Verbose)            outs() << "Skipping compiled textual Swift interface: "                   << Obj->getObjectFilename() << "\n"; diff --git a/llvm/tools/dsymutil/LinkUtils.h b/llvm/tools/dsymutil/LinkUtils.h index ad5515a..c333a3d 100644 --- a/llvm/tools/dsymutil/LinkUtils.h +++ b/llvm/tools/dsymutil/LinkUtils.h @@ -114,6 +114,13 @@ struct LinkOptions {    /// Whether all remarks should be kept or only remarks with valid debug    /// locations.    bool RemarksKeepAll = true; + +  /// Whether or not to copy binary swiftmodules built from textual +  /// .swiftinterface files into the dSYM bundle. These typically come only +  /// from the SDK (since textual interfaces require library evolution) and +  /// thus are a waste of space to copy into the bundle. Turn this on if the +  /// swiftmodules are different from those in the SDK. +  bool IncludeSwiftModulesFromInterface = false;    /// @}    LinkOptions() = default; diff --git a/llvm/tools/dsymutil/Options.td b/llvm/tools/dsymutil/Options.td index ad35e55..e99bc12 100644 --- a/llvm/tools/dsymutil/Options.td +++ b/llvm/tools/dsymutil/Options.td @@ -202,6 +202,14 @@ def remarks_drop_without_debug: Flag<["--", "-"], "remarks-drop-without-debug">,             "all remarks are kept.">,    Group<grp_general>; +def include_swiftmodules_from_interface: Flag<["--", "-"], "include-swiftmodules-from-interface">, +  HelpText<"Whether or not to copy binary swiftmodules built from textual " +  ".swiftinterface files into the dSYM bundle. These typically come only " +  "from the SDK (since textual interfaces require library evolution) and " +  "thus are a waste of space to copy into the bundle. Turn this on if the " +  "swiftmodules are different from those in the SDK.">, +  Group<grp_general>; +  def linker: Separate<["--", "-"], "linker">,    MetaVarName<"<DWARF linker type>">,    HelpText<"Specify the desired type of DWARF linker. Defaults to 'classic'">, diff --git a/llvm/tools/dsymutil/dsymutil.cpp b/llvm/tools/dsymutil/dsymutil.cpp index 913077e..688f6aa 100644 --- a/llvm/tools/dsymutil/dsymutil.cpp +++ b/llvm/tools/dsymutil/dsymutil.cpp @@ -391,6 +391,9 @@ static Expected<DsymutilOptions> getOptions(opt::InputArgList &Args) {    Options.LinkOpts.RemarksKeepAll =        !Args.hasArg(OPT_remarks_drop_without_debug); +  Options.LinkOpts.IncludeSwiftModulesFromInterface = +      Args.hasArg(OPT_include_swiftmodules_from_interface); +    if (opt::Arg *BuildVariantSuffix = Args.getLastArg(OPT_build_variant_suffix))      Options.LinkOpts.BuildVariantSuffix = BuildVariantSuffix->getValue(); diff --git a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp index 79216e8..88d6daf 100644 --- a/llvm/tools/llvm-jitlink/llvm-jitlink.cpp +++ b/llvm/tools/llvm-jitlink/llvm-jitlink.cpp @@ -776,6 +776,7 @@ createSharedMemoryManager(SimpleRemoteEPC &SREPC) {        SlabSize, SREPC, SAs);  } +#if LLVM_ON_UNIX && LLVM_ENABLE_THREADS  static void setupEPCRemoteMemoryManager(SimpleRemoteEPC::Setup &S) {    switch (UseMemMgr) {    case MemMgr::Default: @@ -789,6 +790,7 @@ static void setupEPCRemoteMemoryManager(SimpleRemoteEPC::Setup &S) {      break;    }  } +#endif  static Expected<MaterializationUnit::Interface>  getTestObjectFileInterface(Session &S, MemoryBufferRef O) { diff --git a/llvm/unittests/IR/ConstantsTest.cpp b/llvm/unittests/IR/ConstantsTest.cpp index 6376165..9cb9e12 100644 --- a/llvm/unittests/IR/ConstantsTest.cpp +++ b/llvm/unittests/IR/ConstantsTest.cpp @@ -29,13 +29,8 @@ TEST(ConstantsTest, UseCounts) {    EXPECT_TRUE(Zero->use_empty());    EXPECT_EQ(Zero->getNumUses(), 0u); -  EXPECT_TRUE(Zero->hasNUses(0));    EXPECT_FALSE(Zero->hasOneUse());    EXPECT_FALSE(Zero->hasOneUser()); -  EXPECT_FALSE(Zero->hasNUses(1)); -  EXPECT_FALSE(Zero->hasNUsesOrMore(1)); -  EXPECT_FALSE(Zero->hasNUses(2)); -  EXPECT_FALSE(Zero->hasNUsesOrMore(2));    std::unique_ptr<Module> M(new Module("MyModule", Context)); @@ -50,15 +45,36 @@ TEST(ConstantsTest, UseCounts) {    // Still looks like use_empty with uses.    EXPECT_TRUE(Zero->use_empty());    EXPECT_EQ(Zero->getNumUses(), 0u); -  EXPECT_TRUE(Zero->hasNUses(0));    EXPECT_FALSE(Zero->hasOneUse());    EXPECT_FALSE(Zero->hasOneUser()); -  EXPECT_FALSE(Zero->hasNUses(1)); -  EXPECT_FALSE(Zero->hasNUsesOrMore(1)); -  EXPECT_FALSE(Zero->hasNUses(2)); -  EXPECT_FALSE(Zero->hasNUsesOrMore(2));  } +#ifdef GTEST_HAS_DEATH_TEST +#ifndef NDEBUG + +TEST(ConstantsTest, hasNUsesInvalid) { +  LLVMContext Context; +  Type *Int32Ty = Type::getInt32Ty(Context); +  Constant *Zero = ConstantInt::get(Int32Ty, 0); +  std::unique_ptr<Module> M(new Module("MyModule", Context)); + +  // Introduce some uses +  new GlobalVariable(*M, Int32Ty, /*isConstant=*/false, +                     GlobalValue::ExternalLinkage, /*Initializer=*/Zero, +                     "gv_user0"); +  new GlobalVariable(*M, Int32Ty, /*isConstant=*/false, +                     GlobalValue::ExternalLinkage, /*Initializer=*/Zero, +                     "gv_user1"); + +  for (int I = 0; I != 3; ++I) { +    EXPECT_DEATH(Zero->hasNUses(I), "hasUseList\\(\\)"); +    EXPECT_DEATH(Zero->hasNUsesOrMore(I), "hasUseList\\(\\)"); +  } +} + +#endif +#endif +  TEST(ConstantsTest, Integer_i1) {    LLVMContext Context;    IntegerType *Int1 = IntegerType::get(Context, 1); diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp index 4b53cc3..4908eda 100644 --- a/llvm/unittests/Transforms/Utils/LocalTest.cpp +++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp @@ -1153,7 +1153,7 @@ TEST(Local, ExpressionForConstant) {    IntegerType *Int1Ty = Type::getInt1Ty(Context);    Expr = createExpression(ConstantInt::getTrue(Context), Int1Ty);    EXPECT_NE(Expr, nullptr); -  EXPECT_EQ(Expr->getElement(1), 18446744073709551615U); +  EXPECT_EQ(Expr->getElement(1), 1U);    Expr = createExpression(ConstantInt::getFalse(Context), Int1Ty);    EXPECT_NE(Expr, nullptr); diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py index 469e27f..61f0d67 100644 --- a/llvm/utils/UpdateTestChecks/asm.py +++ b/llvm/utils/UpdateTestChecks/asm.py @@ -576,6 +576,7 @@ def get_run_handler(triple):          "armv7-apple-ios": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_IOS_RE),          "armv7-apple-darwin": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),          "armv7k-apple-watchos": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE), +        "thumbv7k-apple-watchos": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_DARWIN_RE),          "thumb": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),          "thumb-macho": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE),          "thumbv5-macho": (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_MACHO_RE), diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn index a42f781..b6c2f46 100644 --- a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn +++ b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn @@ -21,6 +21,7 @@ static_library("lib") {    sources = [      "Breakpoint.cpp",      "BreakpointBase.cpp", +    "ClientLauncher.cpp",      "CommandPlugins.cpp",      "DAP.cpp",      "DAPError.cpp", diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt index 661c881..d7af3a7 100644 --- a/llvm/utils/profcheck-xfail.txt +++ b/llvm/utils/profcheck-xfail.txt @@ -530,32 +530,6 @@ Instrumentation/TypeSanitizer/swifterror.ll  LTO/X86/diagnostic-handler-remarks-with-hotness.ll  Other/optimization-remarks-auto.ll  Other/X86/debugcounter-partiallyinlinelibcalls.ll -tools/llvm-objcopy/ELF/auto-remove-add-symtab-shndx.test -tools/UpdateTestChecks/update_analyze_test_checks/loop-access-analysis.test -tools/UpdateTestChecks/update_analyze_test_checks/loop-distribute.test -tools/UpdateTestChecks/update_test_checks/argument_name_reuse.test -tools/UpdateTestChecks/update_test_checks/basic.test -tools/UpdateTestChecks/update_test_checks/check_attrs.test -tools/UpdateTestChecks/update_test_checks/difile_absolute_filenames.test -tools/UpdateTestChecks/update_test_checks/filter_out_after.test -tools/UpdateTestChecks/update_test_checks/generated_funcs_prefix_reuse.test -tools/UpdateTestChecks/update_test_checks/generated_funcs.test -tools/UpdateTestChecks/update_test_checks/global_preserve_name.test -tools/UpdateTestChecks/update_test_checks/if_target.test -tools/UpdateTestChecks/update_test_checks/named_function_arguments_split.test -tools/UpdateTestChecks/update_test_checks/on_the_fly_arg_change.test -tools/UpdateTestChecks/update_test_checks/phi-labels.test -tools/UpdateTestChecks/update_test_checks/pre-process.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values2.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values3.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values4.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values5.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values6.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values_funcs.test -tools/UpdateTestChecks/update_test_checks/stable_ir_values.test -tools/UpdateTestChecks/update_test_checks/switch_case.test -tools/UpdateTestChecks/update_test_checks/tbaa-semantics-checks.test -tools/UpdateTestChecks/update_test_checks/various_ir_values_dbgrecords.test  Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll  Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll  Transforms/AtomicExpand/AArch64/pcsections.ll @@ -1323,6 +1297,7 @@ Transforms/SimpleLoopUnswitch/trivial-unswitch.ll  Transforms/SimpleLoopUnswitch/trivial-unswitch-logical-and-or.ll  Transforms/StackProtector/cross-dso-cfi-stack-chk-fail.ll  Transforms/StructurizeCFG/AMDGPU/uniform-regions.ll +Transforms/StructurizeCFG/callbr.ll  Transforms/StructurizeCFG/hoist-zerocost.ll  Transforms/StructurizeCFG/loop-break-phi.ll  Transforms/StructurizeCFG/nested-loop-order.ll diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td index ba5e48e..46fdf54 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td @@ -1589,10 +1589,11 @@ def FPRoundingModeRM   : I32EnumAttrCase<"RM",   2, "rm">;  def FPRoundingModeRP   : I32EnumAttrCase<"RP",   3, "rp">;  def FPRoundingModeRZ   : I32EnumAttrCase<"RZ",   4, "rz">;  def FPRoundingModeRNA  : I32EnumAttrCase<"RNA",  5, "rna">; +def FPRoundingModeRS   : I32EnumAttrCase<"RS",   6, "rs">;  def FPRoundingMode : I32EnumAttr<"FPRoundingMode", "NVVM FPRoundingMode kind",    [FPRoundingModeNone, FPRoundingModeRN, FPRoundingModeRM, -    FPRoundingModeRP, FPRoundingModeRZ, FPRoundingModeRNA]> { +    FPRoundingModeRP, FPRoundingModeRZ, FPRoundingModeRNA, FPRoundingModeRS]> {    let genSpecializedAttr = 0;    let cppNamespace = "::mlir::NVVM";  } @@ -1907,6 +1908,96 @@ def NVVM_ConvertF4x2ToF16x2Op :    NVVM_ConvertToFP16x2Op_Base<"F4", I8, "F16">;  //===----------------------------------------------------------------------===// +// NVVM Stochastic Rounding Conversion Ops +//===----------------------------------------------------------------------===// + +// Base class for conversions from F32x2 to FPx2 formats +// (F16x2, BF16x2) +// TODO: In separate PR, add .rn and .rz rounding variants for this conversion +// as currently only support .rs rounding mode +class NVVM_ConvertF32x2ToFPx2OpBase<string dstFormat, string mnemonic, Type dstType> : +  NVVM_Op<mnemonic, [Pure, NVVMRequiresSMa<[100, 103]>]>, +  Results<(outs dstType:$dst)>, +  Arguments<(ins F32:$src_hi, F32:$src_lo, I32:$rbits, +                 DefaultValuedAttr<FPRoundingModeAttr, "FPRoundingMode::RS">:$rnd, +                 DefaultValuedAttr<SaturationModeAttr, "SaturationMode::NONE">:$sat, +                 DefaultValuedAttr<BoolAttr, "false">:$relu)> { +  let summary = "Convert two F32 values to packed " # dstFormat # " with stochastic rounding (.rs)"; +  let description = [{ +    Converts two F32 values to packed }] # dstFormat # [{ format using stochastic  +    rounding (.rs) mode with randomness provided by the `rbits` parameter. The  +    `relu` attribute clamps negative results to 0. The `sat` attribute determines  +    saturation behavior. The `src_hi` and `src_lo` parameters correspond to operands  +    `a` and `b` in the PTX ISA, respectively. +     +    [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cvt) +  }]; +   +  let assemblyFormat = "$src_hi `,` $src_lo `,` $rbits attr-dict `:` type($dst)"; + +  let hasVerifier = 1; +   +  let extraClassDeclaration = [{ +    llvm::Intrinsic::ID getIntrinsicID(); +  }]; +   +  string llvmBuilder = [{ +    auto intId = op.getIntrinsicID(); +    $dst = createIntrinsicCall(builder, intId, {$src_hi, $src_lo, $rbits}); +  }]; +  } + +// F32x2 -> F16x2 with stochastic rounding +def NVVM_ConvertF32x2ToF16x2Op : NVVM_ConvertF32x2ToFPx2OpBase<"f16x2", "convert.f32x2.to.f16x2", VectorOfLengthAndType<[2], [F16]>>; + +// F32x2 -> BF16x2 with stochastic rounding +def NVVM_ConvertF32x2ToBF16x2Op : NVVM_ConvertF32x2ToFPx2OpBase<"bf16x2", "convert.f32x2.to.bf16x2", VectorOfLengthAndType<[2], [BF16]>>; + +// Base class for stochastic rounding conversions from F32x4 to FPx4 formats +// (E4M3x4, E5M2x4, E2M3x4, E3M2x4, E2M1x4) +// These operations always use RS (stochastic rounding) mode with SATFINITE saturation. +class NVVM_ConvertF32x4ToFPx4OpBase<string dstFormat, string mnemonic, Type dstType> : +  NVVM_Op<mnemonic, [Pure, NVVMRequiresSMa<[100, 103]>]>, +  Results<(outs dstType:$dst)>, +  Arguments<(ins VectorOfLengthAndType<[4], [F32]>:$src, I32:$rbits, +                 DefaultValuedAttr<BoolAttr, "false">:$relu, +                 TypeAttr:$dstTy)> { +  let summary = "Convert vector<4xf32> to packed " # dstFormat # " with stochastic rounding (.rs) and satfinite"; +  let description = [{ +    Converts a vector<4xf32> to packed }] # dstFormat # [{ format using  +    stochastic rounding (.rs) mode with SATFINITE saturation. Randomness is  +    provided by the `rbits` parameter. The `dstTy` attribute specifies the  +    target floating-point format. The `relu` attribute clamps negative results to 0. +     +    Note: These operations always use RS rounding mode and SATFINITE saturation mode. +     +    [For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-cvt) +  }]; +   +  let assemblyFormat = "$src `,` $rbits attr-dict `:` type($src) `->` type($dst) `(` $dstTy `)`"; + +  let hasVerifier = 1; + +  let extraClassDeclaration = [{ +    llvm::Intrinsic::ID getIntrinsicID(); +  }]; +   +  string llvmBuilder = [{ +    auto intId = op.getIntrinsicID(); +    $dst = createIntrinsicCall(builder, intId, {$src, $rbits}); +  }]; +} + +// F32x4 -> F8x4 with stochastic rounding (supports E4M3FN, E5M2) +def NVVM_ConvertF32x4ToF8x4Op : NVVM_ConvertF32x4ToFPx4OpBase<"f8x4", "convert.f32x4.to.f8x4", VectorOfLengthAndType<[4], [I8]>>; + +// F32x4 -> F6x4 with stochastic rounding (supports E2M3FN, E3M2FN) +def NVVM_ConvertF32x4ToF6x4Op : NVVM_ConvertF32x4ToFPx4OpBase<"f6x4", "convert.f32x4.to.f6x4", VectorOfLengthAndType<[4], [I8]>>; + +// F32x4 -> F4x4 with stochastic rounding (supports E2M1FN) +def NVVM_ConvertF32x4ToF4x4Op : NVVM_ConvertF32x4ToFPx4OpBase<"f4x4", "convert.f32x4.to.f4x4", I16>; + +//===----------------------------------------------------------------------===//  // NVVM MMA Ops  //===----------------------------------------------------------------------===//  /// Helpers to instantiate different version of wmma intrinsics. diff --git a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td index 2f4517d..c689b7e 100644 --- a/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td +++ b/mlir/include/mlir/Dialect/OpenACC/OpenACCOps.td @@ -2557,6 +2557,12 @@ def OpenACC_LoopOp : OpenACC_Op<"loop",      device-type-aware getter methods. When modifying these operands, the      corresponding `device_type` attributes must be updated to maintain      consistency between operands and their target device types. + +    The `unstructured` attribute indicates that the loops inside the OpenACC +    construct contain early exits and cannot be lowered to structured MLIR +    operations. When this flag is set, the acc.loop should have no induction +    variables and the loop must be implemented via explicit control flow +    inside its body.    }];    let arguments = (ins @@ -2590,7 +2596,8 @@ def OpenACC_LoopOp : OpenACC_Op<"loop",        OptionalAttr<SymbolRefArrayAttr>:$firstprivatizationRecipes,        Variadic<AnyType>:$reductionOperands,        OptionalAttr<SymbolRefArrayAttr>:$reductionRecipes, -      OptionalAttr<OpenACC_CombinedConstructsAttr>:$combined +      OptionalAttr<OpenACC_CombinedConstructsAttr>:$combined, +      UnitAttr:$unstructured    );    let results = (outs Variadic<AnyType>:$results); diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h index 1481859..0c05996 100644 --- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h +++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPU.h @@ -30,9 +30,11 @@ class SliceAttr;  } // namespace xegpu  } // namespace mlir +// clang-format off +#include <mlir/Dialect/XeGPU/IR/XeGPUEnums.h.inc>  #include <mlir/Dialect/XeGPU/IR/XeGPUAttrInterface.h.inc>  #include <mlir/Dialect/XeGPU/IR/XeGPUDialect.h.inc> -#include <mlir/Dialect/XeGPU/IR/XeGPUEnums.h.inc> +// clang-format on  #define GET_ATTRDEF_CLASSES  #include <mlir/Dialect/XeGPU/IR/XeGPUAttrs.h.inc> diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td index 40352b4..9c35c07 100644 --- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td +++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUAttrs.td @@ -223,17 +223,17 @@ def DistributeLayoutAttr: AttrInterface<"DistributeLayoutAttr"> {      InterfaceMethod<"Derive a new layout by dropping InstData",                      "xegpu::DistributeLayoutAttr",                      "dropInstData">, -    InterfaceMethod<[{Delinearizes a linear subgroup ID into its multidimensional -                      indices based on the effective subgroup layout.}], +    InterfaceMethod<[{Delinearizes a linear ID into its multidimensional +                      indices based on the effective layout level.}],                      "FailureOr<SmallVector<Value>>", -                    "delinearizeSubgroupId", +                    "delinearizeId",                      (ins "OpBuilder &": $builder, "Location":$loc, "Value":$linearId)>, -    InterfaceMethod<[{Generates instructions to compute multidimensional offsets for blocks -                      assigned to a subgroup identified by linearId. The shape parameter -                      represents the workgroup-level problem size. Each subgroup may access +    InterfaceMethod<[{Generates instructions to compute multidimensional coordinates for dist units +                      assigned to a level identified by linearId. The shape parameter +                      represents the higher-level problem size. Each level may access                        multiple blocks according to round-robin distribution rules.}],                      "FailureOr<SmallVector<SmallVector<Value>>>", -                    "getOffsets", +                    "computeDistributedCoords",                      (ins "OpBuilder &": $builder, "Location":$loc, "Value":$linearId, "ArrayRef<int64_t>":$shape)>,      InterfaceMethod</*desc=*/[{Check if this layout can be achieved by applying a transpose                       to some other layout according to given permutation of (0...n-1).}], @@ -476,17 +476,17 @@ def XeGPU_LayoutAttr : XeGPUAttr<"Layout", "layout", [DistributeLayoutAttr]> {        return {};      } -    /// Delinearizes a linear subgroup ID into its multidimensional indices -    /// based on the effective subgroup layout. +    /// Delinearizes a linear ID into its multidimensional indices +    /// based on the effective level of the layout.      FailureOr<SmallVector<Value>> -    delinearizeSubgroupId(OpBuilder &builder, Location loc, Value linearId); +    delinearizeId(OpBuilder &builder, Location loc, Value linearId); -    /// Generates instructions to compute multidimensional offsets for blocks -    /// assigned to a subgroup identified by linearId. The shape parameter -    /// represents the workgroup-level problem size. Each subgroup may access +    /// Generates instructions to compute multidimensional coordinates for dist units +    /// assigned to a level identified by linearId. The shape parameter +    /// represents the higher-level problem size. Each `level` may access      /// multiple blocks according to round-robin distribution rules.      FailureOr<SmallVector<SmallVector<Value>>> -    getOffsets(OpBuilder &builder, Location loc, Value linearId, ArrayRef<int64_t> shape); +    computeDistributedCoords(OpBuilder &builder, Location loc, Value linearId, ArrayRef<int64_t> shape);      /// Check if this is slice of some other layout.      bool isSliceOf(const xegpu::DistributeLayoutAttr &other) { return false; } @@ -643,14 +643,15 @@ def XeGPU_SliceAttr : XeGPUAttr<"Slice", "slice", [DistributeLayoutAttr]> {      /// Delinearizes a linear subgroup ID into its multidimensional indices      /// based on the effective subgroup layout.      FailureOr<SmallVector<Value>> -    delinearizeSubgroupId(OpBuilder &builder, Location loc, Value linearId); +    delinearizeId(OpBuilder &builder, Location loc, Value linearId); -    /// Generates instructions to compute multidimensional offsets for blocks +    /// Generates instructions to compute multidimensional coordinates for blocks      /// assigned to a subgroup identified by linearId. The shape parameter      /// represents the workgroup-level problem size. Each subgroup may access      /// multiple blocks according to round-robin distribution rules. +      FailureOr<SmallVector<SmallVector<Value>>> -    getOffsets(OpBuilder &builder, Location loc, Value linearId, ArrayRef<int64_t> shape); +    computeDistributedCoords(OpBuilder &builder, Location loc, Value linearId, ArrayRef<int64_t> shape);      /// Check if this is slice of some other layout.      bool isSliceOf(const xegpu::DistributeLayoutAttr &other); diff --git a/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td b/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td index b7af541..eb05628 100644 --- a/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/XeGPU/Transforms/Passes.td @@ -26,7 +26,7 @@ def XeGPUSubgroupDistribute : Pass<"xegpu-subgroup-distribute"> {      The pass distributes subgroup level (SIMD) XeGPU ops to work items.    }];    let dependentDialects = ["memref::MemRefDialect", "xegpu::XeGPUDialect", -                           "vector::VectorDialect"]; +                           "vector::VectorDialect", "index::IndexDialect"];  }  def XeGPUPropagateLayout : Pass<"xegpu-propagate-layout"> { diff --git a/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp b/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp index 33e8f2e..de552ce 100644 --- a/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp +++ b/mlir/lib/Conversion/XeGPUToXeVM/XeGPUToXeVM.cpp @@ -562,6 +562,8 @@ class LoadStoreMatrixToXeVMPattern : public OpConversionPattern<OpType> {      VectorType valOrResVecTy = dyn_cast<VectorType>(data.getType());      if (!valOrResVecTy)        valOrResVecTy = VectorType::get(1, data.getType()); +    if (valOrResVecTy.getShape().size() != 1) +      return rewriter.notifyMatchFailure(op, "Expected 1D data vector.");      int64_t elemBitWidth =          valOrResVecTy.getElementType().getIntOrFloatBitWidth(); diff --git a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp index 4d2d873..3d1a734 100644 --- a/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp +++ b/mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp @@ -66,9 +66,10 @@ static Value getSupportedReduction(AffineForOp forOp, unsigned pos,            .Case([](arith::MaxSIOp) { return arith::AtomicRMWKind::maxs; })            .Case([](arith::MinUIOp) { return arith::AtomicRMWKind::minu; })            .Case([](arith::MaxUIOp) { return arith::AtomicRMWKind::maxu; }) +          .Case([](arith::XOrIOp) { return arith::AtomicRMWKind::xori; }) +          .Case([](arith::MaxNumFOp) { return arith::AtomicRMWKind::maxnumf; }) +          .Case([](arith::MinNumFOp) { return arith::AtomicRMWKind::minnumf; })            .Default([](Operation *) -> std::optional<arith::AtomicRMWKind> { -            // TODO: AtomicRMW supports other kinds of reductions this is -            // currently not detecting, add those when the need arises.              return std::nullopt;            });    if (!maybeKind) diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp index a5ffb9e..12c8162 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp @@ -365,6 +365,59 @@ LogicalResult ConvertF4x2ToF16x2Op::verify() {    return success();  } +//===----------------------------------------------------------------------===// +// Stochastic Rounding Conversion Ops +//===----------------------------------------------------------------------===// + +LogicalResult ConvertF32x2ToF16x2Op::verify() { +  if (getRnd() != FPRoundingMode::RS) +    return emitOpError("Only RS rounding mode is supported for " +                       "conversions from f32x2 to f16x2."); +  return success(); +} + +LogicalResult ConvertF32x2ToBF16x2Op::verify() { +  if (getRnd() != FPRoundingMode::RS) +    return emitOpError("Only RS rounding mode is supported for " +                       "conversions from f32x2 to bf16x2."); +  return success(); +} + +LogicalResult ConvertF32x4ToF8x4Op::verify() { +  mlir::MLIRContext *ctx = getContext(); + +  if (!llvm::isa<mlir::Float8E4M3FNType, mlir::Float8E5M2Type>(getDstTy())) +    return emitOpError("Only ") +           << mlir::Float8E4M3FNType::get(ctx) << " and " +           << mlir::Float8E5M2Type::get(ctx) +           << " types are supported for conversions from f32x4 to f8x4."; + +  return success(); +} + +LogicalResult ConvertF32x4ToF6x4Op::verify() { +  mlir::MLIRContext *ctx = getContext(); + +  if (!llvm::isa<mlir::Float6E2M3FNType, mlir::Float6E3M2FNType>(getDstTy())) +    return emitOpError("Only ") +           << mlir::Float6E2M3FNType::get(ctx) << " and " +           << mlir::Float6E3M2FNType::get(ctx) +           << " types are supported for conversions from f32x4 to f6x4."; + +  return success(); +} + +LogicalResult ConvertF32x4ToF4x4Op::verify() { +  mlir::MLIRContext *ctx = getContext(); + +  if (!llvm::isa<mlir::Float4E2M1FNType>(getDstTy())) +    return emitOpError("Only ") << mlir::Float4E2M1FNType::get(ctx) +                                << " type is supported for conversions from " +                                   "f32x4 to f4x4."; + +  return success(); +} +  LogicalResult BulkStoreOp::verify() {    if (getInitVal() != 0)      return emitOpError("only 0 is supported for initVal, got ") << getInitVal(); @@ -2469,6 +2522,85 @@ Tcgen05CommitOp::getIntrinsicIDAndArgs(Operation &op,      return TCGEN05_CP_2CTA(shape_mc, , is_2cta);                               \    }() +llvm::Intrinsic::ID ConvertF32x2ToF16x2Op::getIntrinsicID() { +  bool hasRelu = getRelu(); +  bool hasSatFinite = (getSat() == NVVM::SaturationMode::SATFINITE); + +  if (hasRelu && hasSatFinite) +    return llvm::Intrinsic::nvvm_ff2f16x2_rs_relu_satfinite; +  if (hasRelu) +    return llvm::Intrinsic::nvvm_ff2f16x2_rs_relu; +  if (hasSatFinite) +    return llvm::Intrinsic::nvvm_ff2f16x2_rs_satfinite; +  return llvm::Intrinsic::nvvm_ff2f16x2_rs; +} + +llvm::Intrinsic::ID ConvertF32x2ToBF16x2Op::getIntrinsicID() { +  bool hasRelu = getRelu(); +  bool hasSatFinite = (getSat() == NVVM::SaturationMode::SATFINITE); + +  if (hasRelu && hasSatFinite) +    return llvm::Intrinsic::nvvm_ff2bf16x2_rs_relu_satfinite; +  if (hasRelu) +    return llvm::Intrinsic::nvvm_ff2bf16x2_rs_relu; +  if (hasSatFinite) +    return llvm::Intrinsic::nvvm_ff2bf16x2_rs_satfinite; +  return llvm::Intrinsic::nvvm_ff2bf16x2_rs; +} + +llvm::Intrinsic::ID ConvertF32x4ToF8x4Op::getIntrinsicID() { +  mlir::Type dstTy = getDstTy(); +  bool hasRelu = getRelu(); + +  return llvm::TypeSwitch<mlir::Type, llvm::Intrinsic::ID>(dstTy) +      .Case<mlir::Float8E4M3FNType>([&](mlir::Float8E4M3FNType) { +        return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite +                       : llvm::Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite; +      }) +      .Case<mlir::Float8E5M2Type>([&](mlir::Float8E5M2Type) { +        return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite +                       : llvm::Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite; +      }) +      .Default([](mlir::Type) { +        llvm_unreachable("Invalid F8 type in ConvertF32x4ToF8x4Op"); +        return llvm::Intrinsic::not_intrinsic; +      }); +} + +llvm::Intrinsic::ID ConvertF32x4ToF6x4Op::getIntrinsicID() { +  mlir::Type dstTy = getDstTy(); +  bool hasRelu = getRelu(); + +  return llvm::TypeSwitch<mlir::Type, llvm::Intrinsic::ID>(dstTy) +      .Case<mlir::Float6E2M3FNType>([&](mlir::Float6E2M3FNType) { +        return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite +                       : llvm::Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite; +      }) +      .Case<mlir::Float6E3M2FNType>([&](mlir::Float6E3M2FNType) { +        return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite +                       : llvm::Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite; +      }) +      .Default([](mlir::Type) { +        llvm_unreachable("Invalid F6 type in ConvertF32x4ToF6x4Op"); +        return llvm::Intrinsic::not_intrinsic; +      }); +} + +llvm::Intrinsic::ID ConvertF32x4ToF4x4Op::getIntrinsicID() { +  mlir::Type dstTy = getDstTy(); +  bool hasRelu = getRelu(); + +  return llvm::TypeSwitch<mlir::Type, llvm::Intrinsic::ID>(dstTy) +      .Case<mlir::Float4E2M1FNType>([&](mlir::Float4E2M1FNType) { +        return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite +                       : llvm::Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite; +      }) +      .Default([](mlir::Type) { +        llvm_unreachable("Invalid F4 type in ConvertF32x4ToF4x4Op"); +        return llvm::Intrinsic::not_intrinsic; +      }); +} +  llvm::Intrinsic::ID Tcgen05CpOp::getIntrinsicID(Operation &op) {    auto curOp = cast<NVVM::Tcgen05CpOp>(op);    bool is2CTA = curOp.getGroup() == CTAGroupKind::CTA_2; diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp index 35eba72..b2f1d84 100644 --- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp +++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp @@ -3068,8 +3068,12 @@ LogicalResult acc::LoopOp::verify() {    if (getRegion().empty())      return emitError("expected non-empty body."); -  // When it is container-like - it is expected to hold a loop-like operation. -  if (isContainerLike()) { +  if (getUnstructured()) { +    if (!isContainerLike()) +      return emitError( +          "unstructured acc.loop must not have induction variables"); +  } else if (isContainerLike()) { +    // When it is container-like - it is expected to hold a loop-like operation.      // Obtain the maximum collapse count - we use this to check that there      // are enough loops contained.      uint64_t collapseCount = getCollapseValue().value_or(1); diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index ae3423c..daef0ba 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -717,7 +717,15 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op,    case arith::AtomicRMWKind::ori:      return vector::ReductionOp::create(builder, vector.getLoc(),                                         CombiningKind::OR, vector); -  // TODO: Add remaining reduction operations. +  case arith::AtomicRMWKind::minnumf: +    return vector::ReductionOp::create(builder, vector.getLoc(), +                                       CombiningKind::MINNUMF, vector); +  case arith::AtomicRMWKind::maxnumf: +    return vector::ReductionOp::create(builder, vector.getLoc(), +                                       CombiningKind::MAXNUMF, vector); +  case arith::AtomicRMWKind::xori: +    return vector::ReductionOp::create(builder, vector.getLoc(), +                                       CombiningKind::XOR, vector);    default:      (void)emitOptionalError(loc, "Reduction operation type not supported");      break; diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp index 83406c8..397107b 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUDialect.cpp @@ -37,55 +37,61 @@ void XeGPUDialect::initialize() {        >();  } -/// Generates instructions to compute offsets for a subgroup identified by -/// its multidimensional indices (sgId), using the specified subgroup layout -/// (sgLayout), subgroup data dimensions (sizePerSg), and the overall data -/// dimensions (sizePerWg). +// A `srcShape` consists of N distribution units, each being `subShapesLayout` x +// `subShape`. A `delinearizedId` is used to identify a particular `subShape` +// within each distribution unit. +// Example: +// WG data is 128x256. SG data is 16x32, in 4x2 layout, this gives a +// distribution unit of shape 64x64, we have 2x4 such distribution units. +// `delinearizedId` is used to identify a 16x32 of a subgroup in each +// distribution unit.  static SmallVector<SmallVector<Value>> -genOffsetsComputingInsts(OpBuilder &builder, Location loc, -                         SmallVector<Value> sgId, ArrayRef<int64_t> sgLayout, -                         ArrayRef<int64_t> sizePerSg, -                         ArrayRef<int64_t> sizePerWg) { - -  SmallVector<SmallVector<Value>> offsets; +genCoordinates(OpBuilder &builder, Location loc, +               SmallVector<Value> delinearizedId, +               ArrayRef<int64_t> subShapesLayout, ArrayRef<int64_t> subShape, +               ArrayRef<int64_t> srcShape) { +  SmallVector<SmallVector<Value>> coordinates; + +  // A distribution unit must be less than or equal to `srcShape` +  SmallVector<int64_t> distUnitShape = llvm::map_to_vector( +      llvm::zip_equal(srcShape, +                      computeElementwiseMul(subShapesLayout, subShape)), +      [](const auto &t) { return std::min(std::get<0>(t), std::get<1>(t)); }); -  // nd local offset, localOffset[i] = sgId[i] * sizePerSg[i] -  SmallVector<Value> localOffsets = llvm::map_to_vector( -      llvm::zip(sgId, sizePerSg), [&](const auto &t) -> Value { +  // Get the offset of `subShape` within a distribution unit. +  SmallVector<Value> distUnitLocalOffset = llvm::map_to_vector( +      llvm::zip(delinearizedId, subShape), [&](const auto &t) -> Value {          return builder.createOrFold<index::MulOp>(              loc, std::get<0>(t),              builder.createOrFold<arith::ConstantIndexOp>(loc, std::get<1>(t)));        }); -  // distUnit[i] is the minimum value between sizePerWg[i] and -  // sgLayout[i] * sizePerSg[i] -  SmallVector<int64_t> distUnit = llvm::map_to_vector( -      llvm::zip_equal(sizePerWg, computeElementwiseMul(sgLayout, sizePerSg)), -      [](const auto &t) { return std::min(std::get<0>(t), std::get<1>(t)); }); - +  // For each dist unit    for (SmallVector<int64_t> unitOffs : -       StaticTileOffsetRange(sizePerWg, distUnit)) { +       StaticTileOffsetRange(srcShape, distUnitShape)) { +    // Get dist unit offset within `srcShape`.      SmallVector<Value> base =          llvm::map_to_vector(unitOffs, [&](int64_t d) -> Value {            return arith::ConstantIndexOp::create(builder, loc, d);          }); - -    SmallVector<Value> adds = llvm::map_to_vector( -        llvm::zip_equal(base, localOffsets), [&](const auto &t) -> Value { -          return builder.createOrFold<arith::AddIOp>(loc, std::get<0>(t), -                                                     std::get<1>(t)); -        }); - +    // Calculate `subShape` offset within `srcShape`. +    SmallVector<Value> adds = +        llvm::map_to_vector(llvm::zip_equal(base, distUnitLocalOffset), +                            [&](const auto &t) -> Value { +                              return builder.createOrFold<arith::AddIOp>( +                                  loc, std::get<0>(t), std::get<1>(t)); +                            }); +    // Do not go beyond `srcShape` bounds.      SmallVector<Value> mods = llvm::map_to_vector( -        llvm::zip_equal(adds, sizePerWg), [&](const auto &t) -> Value { +        llvm::zip_equal(adds, srcShape), [&](const auto &t) -> Value {            return builder.createOrFold<index::RemUOp>(                loc, std::get<0>(t),                arith::ConstantIndexOp::create(builder, loc, std::get<1>(t)));          }); -    offsets.push_back(mods); +    coordinates.push_back(mods);    } -  return offsets; +  return coordinates;  }  // Checks if the given shape can be evenly distributed based on the layout @@ -272,12 +278,7 @@ LayoutAttr::verify(llvm::function_ref<mlir::InFlightDiagnostic()> emitError,  }  FailureOr<SmallVector<Value>> -LayoutAttr::delinearizeSubgroupId(OpBuilder &builder, Location loc, -                                  Value linearId) { -  // delinearizeSubgroupId is only available for -  // workgroup-level layout attribute -  if (!isForWorkgroup()) -    return failure(); +LayoutAttr::delinearizeId(OpBuilder &builder, Location loc, Value linearId) {    // TODO: handle order attribute    auto hasDefaultOrder = [&]() { @@ -287,41 +288,52 @@ LayoutAttr::delinearizeSubgroupId(OpBuilder &builder, Location loc,    };    if (!hasDefaultOrder())      return mlir::emitError(loc, "order attribute is currently not supported."); - -  auto dims = -      llvm::map_to_vector(getEffectiveSgLayoutAsInt(), [&](int64_t d) -> Value { -        return builder.createOrFold<arith::ConstantIndexOp>(loc, d); -      }); +  SmallVector<int64_t> layout; +  if (isForWorkgroup()) { +    layout = getEffectiveSgLayoutAsInt(); +  } else if (isForSubgroup()) { +    layout = getEffectiveLaneLayoutAsInt(); +  } else { +    return failure(); +  } +  auto dims = llvm::map_to_vector(layout, [&](int64_t d) -> Value { +    return builder.createOrFold<arith::ConstantIndexOp>(loc, d); +  });    return affine::delinearizeIndex(builder, loc, linearId, dims);  } -/// Implements DistributeLayoutAttr::getOffsets to generate +/// Implements DistributeLayoutAttr::computeDistributedCoords to generate  /// instructions for computing multi-dimensional offsets when distributed by  /// LayoutAttr.  FailureOr<SmallVector<SmallVector<Value>>> -LayoutAttr::getOffsets(OpBuilder &builder, Location loc, Value linearId, -                       ArrayRef<int64_t> shape) { -  if (!isForWorkgroup()) +LayoutAttr::computeDistributedCoords(OpBuilder &builder, Location loc, +                                     Value linearId, ArrayRef<int64_t> shape) { +  SmallVector<int64_t> layout; +  SmallVector<int64_t> subShape; +  if (isForWorkgroup()) { +    layout = getEffectiveSgLayoutAsInt(); +    subShape = getEffectiveSgDataAsInt(); +  } else if (isForSubgroup()) { +    layout = getEffectiveLaneLayoutAsInt(); +    subShape = getEffectiveLaneDataAsInt(); +  } else {      return failure(); - -  SmallVector<int64_t> sgLayout = getEffectiveSgLayoutAsInt(); -  SmallVector<int64_t> sgShape = getEffectiveSgDataAsInt(); -  if (sgShape.empty()) { -    if (auto derivedShape = computeShapeRatio(shape, sgLayout)) -      sgShape = derivedShape.value(); +  } +  if (subShape.empty()) { +    if (auto derivedShape = computeShapeRatio(shape, layout)) +      subShape = derivedShape.value();      else        return failure();    }    // delinearize Ids -  auto maybeIds = delinearizeSubgroupId(builder, loc, linearId); +  auto maybeIds = delinearizeId(builder, loc, linearId);    if (failed(maybeIds))      return failure(); -  SmallVector<Value> sgIds = *maybeIds; +  SmallVector<Value> ids = *maybeIds; -  return genOffsetsComputingInsts(builder, loc, sgIds, sgLayout, sgShape, -                                  shape); +  return genCoordinates(builder, loc, ids, layout, subShape, shape);  }  //===----------------------------------------------------------------------===// @@ -375,34 +387,43 @@ SliceAttr SliceAttr::flatten() const {  }  FailureOr<SmallVector<Value>> -SliceAttr::delinearizeSubgroupId(OpBuilder &builder, Location loc, -                                 Value linearId) { +SliceAttr::delinearizeId(OpBuilder &builder, Location loc, Value linearId) {    SliceAttr attr = flatten();    auto parent = dyn_cast<LayoutAttr>(attr.getParent()); -  return parent.delinearizeSubgroupId(builder, loc, linearId); +  return parent.delinearizeId(builder, loc, linearId);  } -/// Implements DistributeLayoutAttr::getOffsets to generate -/// instructions for computing multi-dimensional offsets when distributed by -/// SliceAttr. +// Implements DistributeLayoutAttr::computeDistributedCoords to generate +// instructions for computing multi-dimensional offsets when distributed by +// LayoutAttr.  FailureOr<SmallVector<SmallVector<Value>>> -SliceAttr::getOffsets(OpBuilder &builder, Location loc, Value linearId, -                      ArrayRef<int64_t> shape) { +SliceAttr::computeDistributedCoords(OpBuilder &builder, Location loc, +                                    Value linearId, ArrayRef<int64_t> shape) {    assert(getRank() == static_cast<int64_t>(shape.size()) && "invalid shape.");    if (!isForWorkgroup())      return failure(); -  SmallVector<int64_t> sgLayout = getEffectiveSgLayoutAsInt(); -  SmallVector<int64_t> sgShape = getEffectiveSgDataAsInt(); -  if (sgShape.empty()) { -    if (auto derivedShape = computeShapeRatio(shape, sgLayout)) -      sgShape = derivedShape.value(); +  SmallVector<int64_t> layout; +  SmallVector<int64_t> subShape; +  if (isForWorkgroup()) { +    layout = getEffectiveSgLayoutAsInt(); +    subShape = getEffectiveSgDataAsInt(); +  } else if (isForSubgroup()) { +    layout = getEffectiveLaneLayoutAsInt(); +    subShape = getEffectiveLaneDataAsInt(); +  } else { +    return failure(); +  } + +  if (subShape.empty()) { +    if (auto derivedShape = computeShapeRatio(shape, layout)) +      subShape = derivedShape.value();      else        return failure();    }    // delinearize Ids -  auto maybeIds = delinearizeSubgroupId(builder, loc, linearId); +  auto maybeIds = delinearizeId(builder, loc, linearId);    if (failed(maybeIds))      return failure(); @@ -412,8 +433,7 @@ SliceAttr::getOffsets(OpBuilder &builder, Location loc, Value linearId,    SmallVector<Value> sgIds =        XeGPUDialect::slice(ArrayRef<Value>(*maybeIds), dims); -  return genOffsetsComputingInsts(builder, loc, sgIds, sgLayout, sgShape, -                                  shape); +  return genCoordinates(builder, loc, sgIds, layout, subShape, shape);  }  bool SliceAttr::isSliceOf(const xegpu::DistributeLayoutAttr &other) { diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp index abd12e2..7b6c4b6 100644 --- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp +++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp @@ -175,13 +175,13 @@ isValidGatherScatterBufferParams(Type offsetsTy, Type maskTy,  LogicalResult  IsValidMatrixOpParams(VectorType dataTy, MemDescType mdescTy, -                      UnitAttr subgroup_block_io, +                      UnitAttr subgroup_block_io, DistributeLayoutAttr layout,                        function_ref<InFlightDiagnostic()> emitError) {    if (!dataTy) {      if (subgroup_block_io)        return emitError() << "subgroup_block_io " -                            "are only allowed when result is a 1D VectorType."; +                            "are only allowed when result is a VectorType.";      else        return success();    } @@ -192,15 +192,37 @@ IsValidMatrixOpParams(VectorType dataTy, MemDescType mdescTy,    ArrayRef<int64_t> dataShape = dataTy.getShape();    ArrayRef<int64_t> mdescShape = mdescTy.getShape(); +  SmallVector<int64_t> blockShape = mdescTy.getBlockShape(); +  ArrayAttr strideAttr = mdescTy.getStrideAttr(); +  SmallVector<int64_t> strides; +  for (Attribute attr : strideAttr.getValue()) { +    strides.push_back(cast<IntegerAttr>(attr).getInt()); +  } +  if (subgroup_block_io && layout) { +    auto laneData = layout.getEffectiveLaneDataAsInt(); +    auto laneLayout = layout.getEffectiveLaneLayoutAsInt(); +    if (!laneData.empty()) { +      bool isLaneDataContiguous = +          std::all_of(laneData.begin(), std::prev(laneData.end()), +                      [](int x) { return x == 1; }); +      if (!isLaneDataContiguous) +        return emitError() << "With subgroup_block_io, accessed data must be " +                              "contiguous and coalesced."; +      for (size_t i = 0; i < laneData.size(); ++i) { +        if (laneLayout[i] != blockShape[i]) +          return emitError() << "With subgroup_block_io, the block shape must " +                                "match the lane layout."; +        if (laneLayout[i] != 1 && strides[i] != 1) +          return emitError() << "With subgroup_block_io, the distributed " +                                "dimensions must be contiguous."; +      } +    } +  }    if (dataShape.size() == 2) { -    if (subgroup_block_io) -      return emitError() << "subgroup_block_io " -                            "are only allowed when result is a 1D VectorType.";      if (llvm::any_of(llvm::zip_equal(dataShape, mdescShape),                       [](auto p) { return std::get<0>(p) > std::get<1>(p); }))        return emitError() << "data shape must not exceed mem_desc shape.";    } else { -    SmallVector<int64_t> blockShape = mdescTy.getBlockShape();      // if the subgroup_block_io attribute is set,  mdescTy must have block      // attribute      if (subgroup_block_io && !blockShape.size()) @@ -1105,7 +1127,7 @@ LogicalResult LoadMatrixOp::verify() {    MemDescType mdescTy = getMemDesc().getType();    return IsValidMatrixOpParams(resTy, mdescTy, subgroup_block_io, -                               [&]() { return emitError(); }); +                               getLayoutAttr(), [&]() { return emitError(); });  }  //===----------------------------------------------------------------------===// @@ -1129,7 +1151,7 @@ LogicalResult StoreMatrixOp::verify() {    UnitAttr subgroup_block_io = getSubgroupBlockIoAttr();    MemDescType mdescTy = getMemDesc().getType();    return IsValidMatrixOpParams(dataTy, mdescTy, subgroup_block_io, -                               [&]() { return emitError(); }); +                               getLayoutAttr(), [&]() { return emitError(); });  }  namespace mlir { diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp index 5a3b27e..bbd7733 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUSubgroupDistribute.cpp @@ -7,6 +7,7 @@  //===----------------------------------------------------------------------===//  #include "mlir/Dialect/GPU/IR/GPUDialect.h"  #include "mlir/Dialect/GPU/Utils/DistributionUtils.h" +#include "mlir/Dialect/Index/IR/IndexDialect.h"  #include "mlir/Dialect/MemRef/IR/MemRef.h"  #include "mlir/Dialect/Vector/IR/VectorOps.h"  #include "mlir/Dialect/Vector/Transforms/VectorDistribution.h" @@ -912,6 +913,186 @@ struct StoreDistribution final : public gpu::WarpDistributionPattern {    }  }; +static SmallVector<Value> computeDistributedCoordinatesForMatrixOp( +    PatternRewriter &rewriter, Location loc, xegpu::DistributeLayoutAttr layout, +    Value laneId, ArrayRef<int64_t> payloadShape, ValueRange origOffsets) { +  SmallVector<Value> newCoods; +  auto maybeCoords = +      layout.computeDistributedCoords(rewriter, loc, laneId, payloadShape); +  if (failed(maybeCoords)) +    return {}; +  assert(maybeCoords.value().size() == 1 && +         "Expected one set of distributed offsets"); +  SmallVector<OpFoldResult> ofrVec = xegpu::addWithRightAligned( +      rewriter, loc, getAsOpFoldResult(maybeCoords.value()[0]), +      getAsOpFoldResult(origOffsets)); +  newCoods = llvm::to_vector(llvm::map_range( +      ofrVec, [&](OpFoldResult ofr) -> Value { return cast<Value>(ofr); })); +  return newCoods; +} + +/// Pattern for distributing xegpu::LoadMatrixOp. +struct LoadMatrixDistribution final : public gpu::WarpDistributionPattern { +  using gpu::WarpDistributionPattern::WarpDistributionPattern; +  LogicalResult matchAndRewrite(gpu::WarpExecuteOnLane0Op warpOp, +                                PatternRewriter &rewriter) const override { +    gpu::YieldOp yield = warpOp.getTerminator(); +    Operation *lastNode = yield->getPrevNode(); +    auto matrixOp = dyn_cast_or_null<xegpu::LoadMatrixOp>(lastNode); +    if (!matrixOp) +      return failure(); + +    OpOperand *producedByLastLoad = getWarpResult(warpOp, [&](Operation *op) { +      return isa<xegpu::LoadMatrixOp>(op) && matrixOp == op; +    }); +    if (!producedByLastLoad) +      return rewriter.notifyMatchFailure( +          warpOp, "The last op is not xegpu::LoadMatrixOp"); +    const int operandIdx = producedByLastLoad->getOperandNumber(); + +    VectorType sgPayloadTy = +        dyn_cast<VectorType>(matrixOp.getResult().getType()); +    VectorType warpResultTy = +        cast<VectorType>(warpOp.getResult(operandIdx).getType()); +    if (!sgPayloadTy) +      return rewriter.notifyMatchFailure( +          matrixOp, "the matrix op payload must be a vector type"); + +    auto loc = matrixOp.getLoc(); +    auto offsets = matrixOp.getMixedOffsets(); +    if (offsets.empty()) +      return rewriter.notifyMatchFailure(matrixOp, +                                         "the load op must have offsets"); +    SmallVector<Value> offsetsAsValues = +        vector::getAsValues(rewriter, matrixOp.getLoc(), offsets); + +    auto layout = matrixOp.getLayoutAttr(); +    if (!layout) +      return rewriter.notifyMatchFailure( +          matrixOp, "the matrix operation lacks layout attribute"); + +    FailureOr<VectorType> distPayloadByWarpOpOrFailure = +        getDistVecTypeBasedOnLaneLayout(layout, sgPayloadTy); +    if (failed(distPayloadByWarpOpOrFailure)) +      return rewriter.notifyMatchFailure( +          matrixOp, "Failed to distribute matrix op payload based on layout."); + +    SmallVector<Value> operands = {matrixOp.getMemDesc()}; +    const unsigned offsetsStartIdx = operands.size(); +    operands.append(offsetsAsValues); + +    SmallVector<Type> operandTypes = llvm::to_vector( +        llvm::map_range(operands, [](Value v) { return v.getType(); })); + +    SmallVector<size_t> newRetIndices; +    gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( +        rewriter, warpOp, operands, operandTypes, newRetIndices); +    SmallVector<Value> newOperands = llvm::map_to_vector( +        newRetIndices, [&](size_t idx) { return newWarpOp.getResult(idx); }); + +    SmallVector<int64_t> newConstOffsets{matrixOp.getConstOffsets()}; +    std::fill(newConstOffsets.begin(), newConstOffsets.end(), +              ShapedType::kDynamic); +    DenseI64ArrayAttr newConstOffsetsAttr = +        rewriter.getDenseI64ArrayAttr(newConstOffsets); +    ValueRange currentOffsets = +        ValueRange(newOperands).drop_front(offsetsStartIdx); + +    SmallVector<Value> newCoords = currentOffsets; +    rewriter.setInsertionPointAfter(newWarpOp); + +    if (!matrixOp.getSubgroupBlockIoAttr()) { +      newCoords = computeDistributedCoordinatesForMatrixOp( +          rewriter, loc, layout, newWarpOp.getLaneid(), sgPayloadTy.getShape(), +          currentOffsets); +    } +    xegpu::LoadMatrixOp newOp = xegpu::LoadMatrixOp::create( +        rewriter, newWarpOp.getLoc(), *distPayloadByWarpOpOrFailure, +        newOperands[0], ValueRange(newCoords), newConstOffsetsAttr, +        matrixOp.getSubgroupBlockIoAttr(), xegpu::DistributeLayoutAttr{}); +    // Resolve the output type and replace all uses. +    rewriter.replaceAllUsesWith( +        newWarpOp.getResult(operandIdx), +        resolveDistributedTy(newOp.getResult(), warpResultTy, rewriter)); +    return success(); +  } +}; + +/// Pattern for distributing xegpu::StoreMatrixOp. +struct StoreMatrixDistribution final : public gpu::WarpDistributionPattern { +  using gpu::WarpDistributionPattern::WarpDistributionPattern; +  LogicalResult matchAndRewrite(gpu::WarpExecuteOnLane0Op warpOp, +                                PatternRewriter &rewriter) const override { +    gpu::YieldOp yield = warpOp.getTerminator(); +    Operation *lastNode = yield->getPrevNode(); +    auto matrixOp = dyn_cast_or_null<xegpu::StoreMatrixOp>(lastNode); +    if (!matrixOp) +      return failure(); + +    VectorType sgPayloadTy = dyn_cast<VectorType>(matrixOp.getData().getType()); +    if (!sgPayloadTy) +      return rewriter.notifyMatchFailure( +          matrixOp, "the matrix op payload must be a vector type"); + +    auto loc = matrixOp.getLoc(); +    auto offsets = matrixOp.getMixedOffsets(); +    if (offsets.empty()) +      return rewriter.notifyMatchFailure(matrixOp, +                                         "the store op must have offsets"); +    SmallVector<Value> offsetsAsValues = +        vector::getAsValues(rewriter, matrixOp.getLoc(), offsets); + +    auto layout = matrixOp.getLayoutAttr(); +    if (!layout) +      return rewriter.notifyMatchFailure( +          matrixOp, "the matrix operation lacks layout attribute"); + +    FailureOr<VectorType> distPayloadByWarpOpOrFailure = +        getDistVecTypeBasedOnLaneLayout(layout, sgPayloadTy); +    if (failed(distPayloadByWarpOpOrFailure)) +      return rewriter.notifyMatchFailure( +          matrixOp, "Failed to distribute matrix op payload based on layout."); + +    SmallVector<Value> operands = {matrixOp.getData(), matrixOp.getMemDesc()}; +    const unsigned offsetsStartIdx = operands.size(); +    operands.append(offsetsAsValues); + +    SmallVector<Type> operandTypes = llvm::to_vector( +        llvm::map_range(operands, [](Value v) { return v.getType(); })); +    operandTypes[0] = *distPayloadByWarpOpOrFailure; + +    SmallVector<size_t> newRetIndices; +    gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( +        rewriter, warpOp, operands, operandTypes, newRetIndices); +    SmallVector<Value> newOperands = llvm::map_to_vector( +        newRetIndices, [&](size_t idx) { return newWarpOp.getResult(idx); }); + +    SmallVector<int64_t> newConstOffsets{matrixOp.getConstOffsets()}; +    std::fill(newConstOffsets.begin(), newConstOffsets.end(), +              ShapedType::kDynamic); +    DenseI64ArrayAttr newConstOffsetsAttr = +        rewriter.getDenseI64ArrayAttr(newConstOffsets); +    ValueRange currentOffsets = +        ValueRange(newOperands).drop_front(offsetsStartIdx); + +    SmallVector<Value> newCoords = currentOffsets; +    rewriter.setInsertionPointAfter(newWarpOp); + +    if (!matrixOp.getSubgroupBlockIoAttr()) { +      newCoords = computeDistributedCoordinatesForMatrixOp( +          rewriter, loc, layout, newWarpOp.getLaneid(), sgPayloadTy.getShape(), +          currentOffsets); +    } + +    xegpu::StoreMatrixOp::create( +        rewriter, loc, TypeRange{}, newOperands[0], newOperands[1], +        ValueRange(newCoords), newConstOffsetsAttr, +        matrixOp.getSubgroupBlockIoAttr(), xegpu::DistributeLayoutAttr{}); +    rewriter.eraseOp(matrixOp); +    return success(); +  } +}; +  /// Distribute a scattered load op. The logic and requirements are the same as  /// for the scattered store distribution. The warpOp's payload vector is  /// expected to be distributed by the load's result consumer. @@ -1443,7 +1624,8 @@ void xegpu::populateXeGPUSubgroupDistributePatterns(                 LoadNdDistribution, DpasDistribution, PrefetchNdDistribution,                 GpuBarrierDistribution, VectorMultiReductionDistribution,                 LoadDistribution, StoreDistribution, VectorTransposeDistribution, -               VectorBitcastDistribution, +               VectorBitcastDistribution, LoadMatrixDistribution, +               StoreMatrixDistribution,                 MemrefExtractAlignedPointerAsIndexDistribution>(        patterns.getContext(),        /*pattern benefit=*/regularPatternBenefit); @@ -1468,6 +1650,8 @@ void XeGPUSubgroupDistributePass::runOnOperation() {        // Layouts are needed for vector type only.        if (!isa<VectorType>(operand.get().getType()))          continue; +      if (isa<xegpu::LoadMatrixOp, xegpu::StoreMatrixOp>(op)) +        continue;        auto layout = xegpu::getDistributeLayoutAttr(operand.get());        if (!layout) { diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp index 9fc5ad9..79eea55 100644 --- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp +++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUWgToSgDistribute.cpp @@ -114,7 +114,8 @@ genOffsetsList(ConversionPatternRewriter &rewriter, OpType op,    // Compute the list of subgroup-relative offsets for sub-tensors or sub-memory    // descriptors to be accessed, based on the layout information.    ArrayRef<int64_t> wgShape = op.getDataShape(); -  auto maybeDescOffsets = layout.getOffsets(rewriter, loc, sgId, wgShape); +  auto maybeDescOffsets = +      layout.computeDistributedCoords(rewriter, loc, sgId, wgShape);    if (failed(maybeDescOffsets))      return failure(); @@ -830,8 +831,8 @@ struct WgToSgArithConstantOp : public OpConversionPattern<arith::ConstantOp> {        // Get subgroup id        Value sgId =            gpu::SubgroupIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); - -      auto sgOffsets = layout.getOffsets(rewriter, loc, sgId, wgShape); +      auto sgOffsets = +          layout.computeDistributedCoords(rewriter, loc, sgId, wgShape);        if (failed(sgOffsets))          return failure(); @@ -1052,7 +1053,8 @@ struct WgToSgVectorStepOp : public OpConversionPattern<vector::StepOp> {      Value sgId =          gpu::SubgroupIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); -    auto sgOffsets = layout.getOffsets(rewriter, loc, sgId, wgShape); +    auto sgOffsets = +        layout.computeDistributedCoords(rewriter, loc, sgId, wgShape);      if (failed(sgOffsets))        return failure(); diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp index 3a23bbf..2fe0697 100644 --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -1105,10 +1105,6 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {    /// A set of operations that were modified by the current pattern.    SetVector<Operation *> patternModifiedOps; -  /// A set of blocks that were inserted (newly-created blocks or moved blocks) -  /// by the current pattern. -  SetVector<Block *> patternInsertedBlocks; -    /// A list of unresolved materializations that were created by the current    /// pattern.    DenseSet<UnrealizedConversionCastOp> patternMaterializations; @@ -2046,8 +2042,6 @@ void ConversionPatternRewriterImpl::notifyBlockInserted(    if (!config.allowPatternRollback && config.listener)      config.listener->notifyBlockInserted(block, previous, previousIt); -  patternInsertedBlocks.insert(block); -    if (wasDetached) {      // If the block was detached, it is most likely a newly created block.      if (config.allowPatternRollback) { @@ -2399,17 +2393,12 @@ private:    bool canApplyPattern(Operation *op, const Pattern &pattern);    /// Legalize the resultant IR after successfully applying the given pattern. -  LogicalResult legalizePatternResult(Operation *op, const Pattern &pattern, -                                      const RewriterState &curState, -                                      const SetVector<Operation *> &newOps, -                                      const SetVector<Operation *> &modifiedOps, -                                      const SetVector<Block *> &insertedBlocks); - -  /// Legalizes the actions registered during the execution of a pattern.    LogicalResult -  legalizePatternBlockRewrites(Operation *op, -                               const SetVector<Block *> &insertedBlocks, -                               const SetVector<Operation *> &newOps); +  legalizePatternResult(Operation *op, const Pattern &pattern, +                        const RewriterState &curState, +                        const SetVector<Operation *> &newOps, +                        const SetVector<Operation *> &modifiedOps); +    LogicalResult    legalizePatternCreatedOperations(const SetVector<Operation *> &newOps);    LogicalResult @@ -2608,7 +2597,6 @@ LogicalResult OperationLegalizer::legalizeWithFold(Operation *op) {    auto cleanup = llvm::make_scope_exit([&]() {      rewriterImpl.patternNewOps.clear();      rewriterImpl.patternModifiedOps.clear(); -    rewriterImpl.patternInsertedBlocks.clear();    });    // Upon failure, undo all changes made by the folder. @@ -2662,24 +2650,16 @@ LogicalResult OperationLegalizer::legalizeWithFold(Operation *op) {  static void  reportNewIrLegalizationFatalError(const Pattern &pattern,                                    const SetVector<Operation *> &newOps, -                                  const SetVector<Operation *> &modifiedOps, -                                  const SetVector<Block *> &insertedBlocks) { +                                  const SetVector<Operation *> &modifiedOps) {    auto newOpNames = llvm::map_range(        newOps, [](Operation *op) { return op->getName().getStringRef(); });    auto modifiedOpNames = llvm::map_range(        modifiedOps, [](Operation *op) { return op->getName().getStringRef(); }); -  StringRef detachedBlockStr = "(detached block)"; -  auto insertedBlockNames = llvm::map_range(insertedBlocks, [&](Block *block) { -    if (block->getParentOp()) -      return block->getParentOp()->getName().getStringRef(); -    return detachedBlockStr; -  }); -  llvm::report_fatal_error( -      "pattern '" + pattern.getDebugName() + -      "' produced IR that could not be legalized. " + "new ops: {" + -      llvm::join(newOpNames, ", ") + "}, " + "modified ops: {" + -      llvm::join(modifiedOpNames, ", ") + "}, " + "inserted block into ops: {" + -      llvm::join(insertedBlockNames, ", ") + "}"); +  llvm::report_fatal_error("pattern '" + pattern.getDebugName() + +                           "' produced IR that could not be legalized. " + +                           "new ops: {" + llvm::join(newOpNames, ", ") + "}, " + +                           "modified ops: {" + +                           llvm::join(modifiedOpNames, ", ") + "}");  }  LogicalResult OperationLegalizer::legalizeWithPattern(Operation *op) { @@ -2743,7 +2723,6 @@ LogicalResult OperationLegalizer::legalizeWithPattern(Operation *op) {      }      rewriterImpl.patternNewOps.clear();      rewriterImpl.patternModifiedOps.clear(); -    rewriterImpl.patternInsertedBlocks.clear();      LLVM_DEBUG({        logFailure(rewriterImpl.logger, "pattern failed to match");        if (rewriterImpl.config.notifyCallback) { @@ -2777,15 +2756,12 @@ LogicalResult OperationLegalizer::legalizeWithPattern(Operation *op) {      SetVector<Operation *> newOps = moveAndReset(rewriterImpl.patternNewOps);      SetVector<Operation *> modifiedOps =          moveAndReset(rewriterImpl.patternModifiedOps); -    SetVector<Block *> insertedBlocks = -        moveAndReset(rewriterImpl.patternInsertedBlocks); -    auto result = legalizePatternResult(op, pattern, curState, newOps, -                                        modifiedOps, insertedBlocks); +    auto result = +        legalizePatternResult(op, pattern, curState, newOps, modifiedOps);      appliedPatterns.erase(&pattern);      if (failed(result)) {        if (!rewriterImpl.config.allowPatternRollback) -        reportNewIrLegalizationFatalError(pattern, newOps, modifiedOps, -                                          insertedBlocks); +        reportNewIrLegalizationFatalError(pattern, newOps, modifiedOps);        rewriterImpl.resetState(curState, pattern.getDebugName());      }      if (config.listener) @@ -2823,8 +2799,7 @@ bool OperationLegalizer::canApplyPattern(Operation *op,  LogicalResult OperationLegalizer::legalizePatternResult(      Operation *op, const Pattern &pattern, const RewriterState &curState,      const SetVector<Operation *> &newOps, -    const SetVector<Operation *> &modifiedOps, -    const SetVector<Block *> &insertedBlocks) { +    const SetVector<Operation *> &modifiedOps) {    [[maybe_unused]] auto &impl = rewriter.getImpl();    assert(impl.pendingRootUpdates.empty() && "dangling root updates"); @@ -2843,8 +2818,7 @@ LogicalResult OperationLegalizer::legalizePatternResult(  #endif // MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS    // Legalize each of the actions registered during application. -  if (failed(legalizePatternBlockRewrites(op, insertedBlocks, newOps)) || -      failed(legalizePatternRootUpdates(modifiedOps)) || +  if (failed(legalizePatternRootUpdates(modifiedOps)) ||        failed(legalizePatternCreatedOperations(newOps))) {      return failure();    } @@ -2853,53 +2827,6 @@ LogicalResult OperationLegalizer::legalizePatternResult(    return success();  } -LogicalResult OperationLegalizer::legalizePatternBlockRewrites( -    Operation *op, const SetVector<Block *> &insertedBlocks, -    const SetVector<Operation *> &newOps) { -  ConversionPatternRewriterImpl &impl = rewriter.getImpl(); -  SmallPtrSet<Operation *, 16> alreadyLegalized; - -  // If the pattern moved or created any blocks, make sure the types of block -  // arguments get legalized. -  for (Block *block : insertedBlocks) { -    if (impl.erasedBlocks.contains(block)) -      continue; - -    // Only check blocks outside of the current operation. -    Operation *parentOp = block->getParentOp(); -    if (!parentOp || parentOp == op || block->getNumArguments() == 0) -      continue; - -    // If the region of the block has a type converter, try to convert the block -    // directly. -    if (auto *converter = impl.regionToConverter.lookup(block->getParent())) { -      std::optional<TypeConverter::SignatureConversion> conversion = -          converter->convertBlockSignature(block); -      if (!conversion) { -        LLVM_DEBUG(logFailure(impl.logger, "failed to convert types of moved " -                                           "block")); -        return failure(); -      } -      impl.applySignatureConversion(block, converter, *conversion); -      continue; -    } - -    // Otherwise, try to legalize the parent operation if it was not generated -    // by this pattern. This is because we will attempt to legalize the parent -    // operation, and blocks in regions created by this pattern will already be -    // legalized later on. -    if (!newOps.count(parentOp) && alreadyLegalized.insert(parentOp).second) { -      if (failed(legalize(parentOp))) { -        LLVM_DEBUG(logFailure( -            impl.logger, "operation '{0}'({1}) became illegal after rewrite", -            parentOp->getName(), parentOp)); -        return failure(); -      } -    } -  } -  return success(); -} -  LogicalResult OperationLegalizer::legalizePatternCreatedOperations(      const SetVector<Operation *> &newOps) {    for (Operation *op : newOps) { @@ -3800,10 +3727,11 @@ static LogicalResult convertFuncOpTypes(FunctionOpInterface funcOp,    TypeConverter::SignatureConversion result(type.getNumInputs());    SmallVector<Type, 1> newResults;    if (failed(typeConverter.convertSignatureArgs(type.getInputs(), result)) || -      failed(typeConverter.convertTypes(type.getResults(), newResults)) || -      failed(rewriter.convertRegionTypes(&funcOp.getFunctionBody(), -                                         typeConverter, &result))) +      failed(typeConverter.convertTypes(type.getResults(), newResults)))      return failure(); +  if (!funcOp.getFunctionBody().empty()) +    rewriter.applySignatureConversion(&funcOp.getFunctionBody().front(), result, +                                      &typeConverter);    // Update the function signature in-place.    auto newType = FunctionType::get(rewriter.getContext(), diff --git a/mlir/test/Conversion/ConvertToSPIRV/vector.mlir b/mlir/test/Conversion/ConvertToSPIRV/vector.mlir index a75f30d..cd8cfc8 100644 --- a/mlir/test/Conversion/ConvertToSPIRV/vector.mlir +++ b/mlir/test/Conversion/ConvertToSPIRV/vector.mlir @@ -275,6 +275,42 @@ func.func @reduction_minimumf(%v : vector<3xf32>, %s: f32) -> f32 {  // ----- +// CHECK-LABEL:   spirv.func @reduction_minnumf( +// CHECK-SAME:      %[[V:.*]]: vector<3xf32>, +// CHECK-SAME:      %[[S:.*]]: f32) -> f32 "None" { +// CHECK:           %[[S0:.*]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32> +// CHECK:           %[[S1:.*]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32> +// CHECK:           %[[S2:.*]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32> +// CHECK:           %[[MIN0:.*]] = spirv.GL.FMin %[[S0]], %[[S1]] : f32 +// CHECK:           %[[MIN1:.*]] = spirv.GL.FMin %[[MIN0]], %[[S2]] : f32 +// CHECK:           %[[MIN2:.*]] = spirv.GL.FMin %[[MIN1]], %[[S]] : f32 +// CHECK:           spirv.ReturnValue %[[MIN2]] : f32 +// CHECK:         } +func.func @reduction_minnumf(%v : vector<3xf32>, %s: f32) -> f32 { +  %reduce = vector.reduction <minnumf>, %v, %s : vector<3xf32> into f32 +  return %reduce : f32 +} + +// ----- + +// CHECK-LABEL:   spirv.func @reduction_maxnumf( +// CHECK-SAME:      %[[V:.*]]: vector<3xf32>, +// CHECK-SAME:      %[[S:.*]]: f32) -> f32 "None" { +// CHECK:           %[[S0:.*]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32> +// CHECK:           %[[S1:.*]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32> +// CHECK:           %[[S2:.*]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32> +// CHECK:           %[[MAX0:.*]] = spirv.GL.FMax %[[S0]], %[[S1]] : f32 +// CHECK:           %[[MAX1:.*]] = spirv.GL.FMax %[[MAX0]], %[[S2]] : f32 +// CHECK:           %[[MAX2:.*]] = spirv.GL.FMax %[[MAX1]], %[[S]] : f32 +// CHECK:           spirv.ReturnValue %[[MAX2]] : f32 +// CHECK:         } +func.func @reduction_maxnumf(%v : vector<3xf32>, %s: f32) -> f32 { +  %reduce = vector.reduction <maxnumf>, %v, %s : vector<3xf32> into f32 +  return %reduce : f32 +} + +// ----- +  // CHECK-LABEL: func @reduction_maxsi  //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)  //       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32> diff --git a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir index b616632..b062736 100644 --- a/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir +++ b/mlir/test/Dialect/Affine/SuperVectorize/vectorize_reduction.mlir @@ -243,6 +243,106 @@ func.func @vecdim_reduction_ori(%in: memref<256x512xi32>, %out: memref<256xi32>)  // CHECK:         affine.store %[[final_red]], %{{.*}} : memref<256xi32>  // CHECK:       } +// ----- + +func.func @vecdim_reduction_xori(%in: memref<256x512xi32>, %out: memref<256xi32>) { + %cst = arith.constant 0 : i32 + affine.for %i = 0 to 256 { +   %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (i32) { +     %ld = affine.load %in[%i, %j] : memref<256x512xi32> +     %xor = arith.xori %red_iter, %ld : i32 +     affine.yield %xor : i32 +   } +   affine.store %final_red, %out[%i] : memref<256xi32> + } + return +} + +// CHECK-LABEL:   func.func @vecdim_reduction_xori( +// CHECK-SAME:      %[[input:.*]]: memref<256x512xi32>, +// CHECK-SAME:      %[[output:.*]]: memref<256xi32>) { +// CHECK:           %[[cst:.*]] = arith.constant 0 : i32 +// CHECK:           affine.for %{{.*}} = 0 to 256 { +// CHECK:             %[[vzero:.*]] = arith.constant dense<0> : vector<128xi32> +// CHECK:             %[[vred:.*]] = affine.for %{{.*}} = 0 to 512 step 128 iter_args(%[[red_iter:.*]] = %[[vzero]]) -> (vector<128xi32>) { +// CHECK:               %[[poison:.*]] = ub.poison : i32 +// CHECK:               %[[ld:.*]] = vector.transfer_read %[[input]]{{\[}}%{{.*}}, %{{.*}}], %[[poison]] : memref<256x512xi32>, vector<128xi32> +// CHECK:               %[[xor:.*]] = arith.xori %[[red_iter]], %[[ld]] : vector<128xi32> +// CHECK:               affine.yield %[[xor]] : vector<128xi32> +// CHECK:             } +// CHECK:             %[[final_red:.*]] = vector.reduction <xor>, %[[vred]] : vector<128xi32> into i32 +// CHECK:             affine.store %[[final_red]], %[[output]]{{\[}}%{{.*}}] : memref<256xi32> +// CHECK:           } +// CHECK:           return +// CHECK:         } + +// ----- + +func.func @vecdim_reduction_minnumf(%in: memref<256x512xf32>, %out: memref<256xf32>) { + %cst = arith.constant 0xFF800000 : f32 + affine.for %i = 0 to 256 { +   %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) { +     %ld = affine.load %in[%i, %j] : memref<256x512xf32> +     %min = arith.minnumf %red_iter, %ld : f32 +     affine.yield %min : f32 +   } +   affine.store %final_red, %out[%i] : memref<256xf32> + } + return +} + +// CHECK-LABEL:   func.func @vecdim_reduction_minnumf( +// CHECK-SAME:      %[[input:.*]]: memref<256x512xf32>, +// CHECK-SAME:      %[[output:.*]]: memref<256xf32>) { +// CHECK:           %[[cst:.*]] = arith.constant 0xFF800000 : f32 +// CHECK:           affine.for %{{.*}} = 0 to 256 { +// CHECK:             %[[vzero:.*]] = arith.constant dense<0x7FC00000> : vector<128xf32> +// CHECK:             %[[vred:.*]] = affine.for %{{.*}} = 0 to 512 step 128 iter_args(%[[red_iter:.*]] = %[[vzero]]) -> (vector<128xf32>) { +// CHECK:               %[[poison:.*]] = ub.poison : f32 +// CHECK:               %[[ld:.*]] = vector.transfer_read %[[input]]{{\[}}%{{.*}}, %{{.*}}], %[[poison]] : memref<256x512xf32>, vector<128xf32> +// CHECK:               %[[min:.*]] = arith.minnumf %[[red_iter]], %[[ld]] : vector<128xf32> +// CHECK:               affine.yield %[[min]] : vector<128xf32> +// CHECK:             } +// CHECK:             %[[red_scalar:.*]] = vector.reduction <minnumf>, %[[vred]] : vector<128xf32> into f32 +// CHECK:             %[[final_red:.*]] = arith.minnumf %[[red_scalar]], %[[cst]] : f32 +// CHECK:             affine.store %[[final_red]], %[[output]]{{\[}}%{{.*}}] : memref<256xf32> +// CHECK:           } +// CHECK:           return +// CHECK:         } + +// ----- + +func.func @vecdim_reduction_maxnumf(%in: memref<256x512xf32>, %out: memref<256xf32>) { + %cst = arith.constant 0xFF800000 : f32 + affine.for %i = 0 to 256 { +   %final_red = affine.for %j = 0 to 512 iter_args(%red_iter = %cst) -> (f32) { +     %ld = affine.load %in[%i, %j] : memref<256x512xf32> +     %max = arith.maxnumf %red_iter, %ld : f32 +     affine.yield %max : f32 +   } +   affine.store %final_red, %out[%i] : memref<256xf32> + } + return +} + +// CHECK-LABEL:   func.func @vecdim_reduction_maxnumf( +// CHECK-SAME:      %[[input:.*]]: memref<256x512xf32>, +// CHECK-SAME:      %[[output:.*]]: memref<256xf32>) { +// CHECK:           %[[cst:.*]] = arith.constant 0xFF800000 : f32 +// CHECK:           affine.for %{{.*}} = 0 to 256 { +// CHECK:             %[[vzero:.*]] = arith.constant dense<0xFFC00000> : vector<128xf32> +// CHECK:             %[[vred:.*]] = affine.for %{{.*}} = 0 to 512 step 128 iter_args(%[[red_iter:.*]] = %[[vzero]]) -> (vector<128xf32>) { +// CHECK:               %[[poison:.*]] = ub.poison : f32 +// CHECK:               %[[ld:.*]] = vector.transfer_read %[[input]]{{\[}}%{{.*}}, %{{.*}}], %[[poison]] : memref<256x512xf32>, vector<128xf32> +// CHECK:               %[[max:.*]] = arith.maxnumf %[[red_iter]], %[[ld]] : vector<128xf32> +// CHECK:               affine.yield %[[max]] : vector<128xf32> +// CHECK:             } +// CHECK:             %[[red_scalar:.*]] = vector.reduction <maxnumf>, %[[vred]] : vector<128xf32> into f32 +// CHECK:             %[[final_red:.*]] = arith.maxnumf %[[red_scalar]], %[[cst]] : f32 +// CHECK:             affine.store %[[final_red]], %[[output]]{{\[}}%{{.*}}] : memref<256xf32> +// CHECK:           } +// CHECK:           return +// CHECK:         }  // ----- diff --git a/mlir/test/Dialect/LLVMIR/nvvm/invalid-convert-stochastic-rounding.mlir b/mlir/test/Dialect/LLVMIR/nvvm/invalid-convert-stochastic-rounding.mlir new file mode 100644 index 0000000..35f5e1b --- /dev/null +++ b/mlir/test/Dialect/LLVMIR/nvvm/invalid-convert-stochastic-rounding.mlir @@ -0,0 +1,90 @@ +// RUN: mlir-opt %s -split-input-file -verify-diagnostics + +// Test invalid target architecture (sm_100 instead of sm_100a) +gpu.module @invalid_arch_sm_100 [#nvvm.target<chip = "sm_100">] { +  func.func @convert_rs() { +    %f1 = llvm.mlir.constant(1.0 : f32) : f32 +    %f2 = llvm.mlir.constant(2.0 : f32) : f32 +    %rbits = llvm.mlir.constant(0x12345678 : i32) : i32 +    // expected-error@+1 {{'nvvm.convert.f32x2.to.f16x2' op is not supported on sm_100}} +    %res = nvvm.convert.f32x2.to.f16x2 %f1, %f2, %rbits : vector<2xf16> +    return +  } +} + +// ----- + +// Test that operations require stochastic rounding mode +llvm.func @invalid_rnd_mode_f16x2(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xf16> { +  // expected-error@+1 {{Only RS rounding mode is supported for conversions from f32x2 to f16x2.}} +  %res = nvvm.convert.f32x2.to.f16x2 %srcA, %srcB, %rbits {rnd = #nvvm.fp_rnd_mode<rn>} : vector<2xf16> +  llvm.return %res : vector<2xf16> +} + +// ----- + +llvm.func @invalid_rnd_mode_bf16x2(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xbf16> { +  // expected-error@+1 {{Only RS rounding mode is supported for conversions from f32x2 to bf16x2.}} +  %res = nvvm.convert.f32x2.to.bf16x2 %srcA, %srcB, %rbits {rnd = #nvvm.fp_rnd_mode<rz>} : vector<2xbf16> +  llvm.return %res : vector<2xbf16> +} + +// ----- + +// Test invalid destination types for f8x4 (should only accept f8E4M3FN, f8E5M2) +llvm.func @invalid_dst_type_f8x4_e3m4(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // expected-error@+1 {{Only 'f8E4M3FN' and 'f8E5M2' types are supported for conversions from f32x4 to f8x4.}} +  %res = nvvm.convert.f32x4.to.f8x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f8E3M4) +  llvm.return %res : vector<4xi8> +} + +// ----- + +llvm.func @invalid_dst_type_f8x4_e8m0(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // expected-error@+1 {{Only 'f8E4M3FN' and 'f8E5M2' types are supported for conversions from f32x4 to f8x4.}} +  %res = nvvm.convert.f32x4.to.f8x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f8E8M0FNU) +  llvm.return %res : vector<4xi8> +} + +// ----- + +// Test invalid destination types for f6x4 (should only accept f6E2M3FN, f6E3M2FN) +llvm.func @invalid_dst_type_f6x4_f8(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // expected-error@+1 {{Only 'f6E2M3FN' and 'f6E3M2FN' types are supported for conversions from f32x4 to f6x4.}} +  %res = nvvm.convert.f32x4.to.f6x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f8E4M3FN) +  llvm.return %res : vector<4xi8> +} + +// ----- + +// Test invalid destination type for f4x4 (should only accept f4E2M1FN) +llvm.func @invalid_dst_type_f4x4_f6(%src : vector<4xf32>, %rbits : i32) -> i16 { +  // expected-error@+1 {{Only 'f4E2M1FN' type is supported for conversions from f32x4 to f4x4.}} +  %res = nvvm.convert.f32x4.to.f4x4 %src, %rbits : vector<4xf32> -> i16 (f6E2M3FN) +  llvm.return %res : i16 +} + +// ----- + +// Test invalid rounding modes for non-stochastic ops +llvm.func @convert_float_to_tf32_rs_not_supported(%src : f32) -> i32 { +  // expected-error @below {{Only {rn,rz,rna} rounding modes supported for ConvertFloatToTF32Op.}} +  %res = nvvm.convert.float.to.tf32 %src {rnd = #nvvm.fp_rnd_mode<rs>} +  llvm.return %res : i32 +} + +// ----- + +llvm.func @convert_f32x2_to_f8x2_rs_not_supported(%a : f32, %b : f32) { +  // expected-error @below {{Only RN rounding mode is supported for conversions from f32x2 to 'f8E4M3FN' and 'f8E5M2' types}} +  %res = nvvm.convert.f32x2.to.f8x2 %a, %b {rnd = #nvvm.fp_rnd_mode<rs>, sat = #nvvm.sat_mode<satfinite>} : i16 (f8E4M3FN) +  llvm.return +} + +// ----- + +llvm.func @convert_bf16x2_to_f8x2_rs_not_supported(%src : vector<2xbf16>) { +  // expected-error @below {{Only RZ and RP rounding modes are supported for conversions from bf16x2 to f8x2.}} +  %res = nvvm.convert.bf16x2.to.f8x2 %src {rnd = #nvvm.fp_rnd_mode<rs>} : vector<2xbf16> -> i16 (f8E8M0FNU) +  llvm.return +} diff --git a/mlir/test/Dialect/OpenACC/invalid.mlir b/mlir/test/Dialect/OpenACC/invalid.mlir index 26b63fb..0e75894 100644 --- a/mlir/test/Dialect/OpenACC/invalid.mlir +++ b/mlir/test/Dialect/OpenACC/invalid.mlir @@ -492,6 +492,15 @@ func.func @fct1(%0 : !llvm.ptr) -> () {  // ----- +%i1 = arith.constant 1 : i32 +%i2 = arith.constant 10 : i32 +// expected-error@+1 {{unstructured acc.loop must not have induction variables}} +acc.loop control(%iv : i32) = (%i1 : i32) to (%i2 : i32) step (%i1 : i32) { +  acc.yield +} attributes {independent = [#acc.device_type<none>], unstructured} + +// ----- +  // expected-error@+1 {{expect at least one of num, dim or static values}}  acc.loop gang({}) {    "test.openacc_dummy_op"() : () -> () diff --git a/mlir/test/Dialect/OpenACC/ops.mlir b/mlir/test/Dialect/OpenACC/ops.mlir index 042ee25..df8ab9b 100644 --- a/mlir/test/Dialect/OpenACC/ops.mlir +++ b/mlir/test/Dialect/OpenACC/ops.mlir @@ -2143,6 +2143,20 @@ func.func @acc_loop_container() {  // ----- +func.func @acc_unstructured_loop() { +  acc.loop { +    acc.yield +  } attributes {independent = [#acc.device_type<none>], unstructured} +  return +} + +// CHECK-LABEL: func.func @acc_unstructured_loop +// CHECK:       acc.loop +// CHECK:         acc.yield +// CHECK:       } attributes {independent = [#acc.device_type<none>], unstructured} + +// ----- +  // Test private recipe with data bounds for array slicing  acc.private.recipe @privatization_memref_slice : memref<10x10xf32> init {  ^bb0(%arg0: memref<10x10xf32>, %bounds0: !acc.data_bounds_ty, %bounds1: !acc.data_bounds_ty): diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir index ebbe3ce..92f3537 100644 --- a/mlir/test/Dialect/XeGPU/invalid.mlir +++ b/mlir/test/Dialect/XeGPU/invalid.mlir @@ -451,7 +451,7 @@ func.func @store_scatter_offset_wi_1(%src: memref<?xf16>) {    %offsets = arith.constant dense<[0]> : vector<1xindex>    %mask = arith.constant dense<1>: vector<1xi1>    // expected-error@+1 {{Mask should match value except the chunk size dim}} -  xegpu.store %val, %src[%offsets], %mask  +  xegpu.store %val, %src[%offsets], %mask          : vector<4xf16>, memref<?xf16>, vector<1xindex>, vector<1xi1>    return  } @@ -871,14 +871,6 @@ func.func @load_mem_desc_invalid_rank(%arg0: !xegpu.mem_desc<64xf16>) {  }  // ----- -func.func @load_mem_desc_invalid_attr2(%arg0: !xegpu.mem_desc<16x64xf16>) { -  // expected-error@+1 {{subgroup_block_io are only allowed when result is a 1D VectorType.}} -  %data2 = xegpu.load_matrix %arg0[8, 8] <{subgroup_block_io}>: !xegpu.mem_desc<16x64xf16> -> vector<16x16xf16> -  return -} - - -// -----  func.func @store_mem_desc_mismatch_element_type(%arg0: !xegpu.mem_desc<16x64xf16>, %arg1: vector<16x16xf32>) {    // expected-error@+1 {{failed to verify that all of {mem_desc, data} have same element type}}    xegpu.store_matrix %arg1, %arg0[8, 8] : vector<16x16xf32>, !xegpu.mem_desc<16x64xf16> @@ -900,16 +892,25 @@ func.func @store_mem_desc_invalid_rank(%arg0: !xegpu.mem_desc<64xf16>, %arg1: ve  }  // ----- -func.func @store_mem_desc_invalid_attr2(%arg0: !xegpu.mem_desc<16x64xf16>, %data: vector<16x16xf16>) { -  // expected-error@+1 {{subgroup_block_io are only allowed when result is a 1D VectorType.}} -  xegpu.store_matrix %data,  %arg0[8, 8] <{subgroup_block_io}>: vector<16x16xf16>, !xegpu.mem_desc<16x64xf16> +func.func @simt_store_matrix_vector_nonlinear(%arg0: !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [32, 1]>>, %arg1: vector<2x16xf32>) { +  // expected-error@+1 {{With subgroup_block_io, accessed data must be contiguous and coalesced}} +  xegpu.store_matrix %arg1, %arg0[0, 0] {subgroup_block_io, layout = #xegpu.layout<lane_layout = [1, 16], lane_data = [2, 1]>} : +        vector<2x16xf32>, !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [32, 1]>>    return  }  // ----- -func.func @store_mem_desc_invalid_attr2(%arg0: !xegpu.mem_desc<16x64xf16>, %data: vector<16x16xf16>) { -  // expected-error@+1 {{subgroup_block_io are only allowed when result is a 1D VectorType.}} -  xegpu.store_matrix %data,  %arg0[8, 8] <{subgroup_block_io}>: vector<16x16xf16>, !xegpu.mem_desc<16x64xf16> +func.func @simt_store_matrix_vector_noncoalesced(%arg0: !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [1, 32], block = [1, 16]>>, %arg1: vector<16x2xf32>) { +  // expected-error@+1 {{With subgroup_block_io, the distributed dimensions must be contiguous}} +  xegpu.store_matrix %arg1, %arg0[0, 0] {subgroup_block_io, layout = #xegpu.layout<lane_layout = [1, 16], lane_data = [1, 2]>} : +        vector<16x2xf32>, !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [1, 32], block = [1, 16]>>    return  } +// ----- +func.func @simt_store_matrix_vector_noncoalesced(%arg0: !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [32, 1], block = [1, 17]>>, %arg1: vector<16x2xf32>) { +  // expected-error@+1 {{With subgroup_block_io, the block shape must match the lane layout}} +  xegpu.store_matrix %arg1, %arg0[0, 0] {subgroup_block_io, layout = #xegpu.layout<lane_layout = [1, 16], lane_data = [1, 1]>} : +        vector<16x2xf32>, !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [32, 1], block = [1, 17]>> +  return +} diff --git a/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir b/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir index 27a3dc3..8946d14 100644 --- a/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir +++ b/mlir/test/Dialect/XeGPU/subgroup-distribute.mlir @@ -265,3 +265,66 @@ gpu.module @xevm_module{      gpu.return    }  } + +// ----- +// CHECK-LABEL: gpu.func @load_store_matrix_1({{.*}}) { +// CHECK: %[[LAYOUT_X:.*]] = arith.constant 8 : index +// CHECK: %[[LAYOUT_Y:.*]] = arith.constant 2 : index +// CHECK: %[[LANE_ID:.*]] = gpu.lane_id +// CHECK: %[[DELINEARIZED_LANE_Y:.*]] = affine.apply #{{.*}}()[%[[LANE_ID]]] +// CHECK: %[[DELINEARIZED_LANE_X:.*]] = affine.apply #{{.*}}()[%[[LANE_ID]]] +// CHECK: %[[LANE_Y_OFFSET:.*]] = index.remu %[[DELINEARIZED_LANE_Y]], %[[LAYOUT_Y]] +// CHECK: %[[LANE_X_OFFSET:.*]] = index.remu %[[DELINEARIZED_LANE_X]], %[[LAYOUT_X]] +// CHECK: %[[MAT:.*]] = xegpu.load_matrix %arg0[%[[LANE_Y_OFFSET]], %[[LANE_X_OFFSET]]] : !xegpu.mem_desc<32x32xf32>, index, index -> vector<1x1xf32> +// CHECK: xegpu.store_matrix %[[MAT]], %arg0[%[[LANE_Y_OFFSET]], %[[LANE_X_OFFSET]]] : vector<1x1xf32>, !xegpu.mem_desc<32x32xf32>, index, index +gpu.module @xevm_module{ +  gpu.func @load_store_matrix_1(%arg0: !xegpu.mem_desc<32x32xf32>) { +    %c0 = arith.constant 0 : index +    %1 = xegpu.load_matrix %arg0[%c0, %c0] <{layout = #xegpu.layout<lane_layout = [2, 8], lane_data = [1, 1]>}> : !xegpu.mem_desc<32x32xf32>, index, index -> vector<2x8xf32> +    xegpu.store_matrix %1, %arg0[%c0, %c0] <{layout = #xegpu.layout<lane_layout = [2, 8], lane_data = [1, 1]>}> : vector<2x8xf32>, !xegpu.mem_desc<32x32xf32>, index, index +    gpu.return +  } +} + +// ----- +// CHECK-LABEL: gpu.func @load_store_matrix_2({{.*}}) { +// CHECK: %[[DIST_UNIT_HEIGHT_X:.*]] = arith.constant 4 : index +// CHECK: %[[DIST_UNIT_HEIGHT_Y:.*]] = arith.constant 8 : index +// CHECK: %[[LANE_DATA_Y:.*]] = arith.constant 2 : index +// CHECK: %[[USER_OFFSET_X:.*]] = arith.constant 1 : index +// CHECK: %[[LANE_ID:.*]] = gpu.lane_id +// CHECK: %[[DELINEARIZED_LANE_Y:.*]] = affine.apply #{{.*}}()[%[[LANE_ID]]] +// CHECK: %[[DELINEARIZED_LANE_X:.*]] = affine.apply #{{.*}}()[%[[LANE_ID]]] +// CHECK: %[[LANE_Y_OFFSET_1:.*]] = index.mul %[[DELINEARIZED_LANE_Y]], %[[LANE_DATA_Y]] +// CHECK: %[[LANE_Y_OFFSET:.*]] = index.remu %[[LANE_Y_OFFSET_1]], %[[DIST_UNIT_HEIGHT_Y]] +// CHECK: %[[LANE_X_OFFSET_1:.*]] = index.remu %[[DELINEARIZED_LANE_X]], %[[DIST_UNIT_HEIGHT_X]] +// CHECK: %[[LANE_X_OFFSET:.*]] = index.add %[[LANE_X_OFFSET_1]], %[[USER_OFFSET_X]] +// CHECK: %[[MAT:.*]] = xegpu.load_matrix %arg0[%[[LANE_Y_OFFSET]], %[[LANE_X_OFFSET]]] : !xegpu.mem_desc<32x32xf32>, index, index -> vector<2x1xf32> +// CHECK: xegpu.store_matrix %[[MAT]], %arg0[%[[LANE_Y_OFFSET]], %[[LANE_X_OFFSET]]] : vector<2x1xf32>, !xegpu.mem_desc<32x32xf32>, index, index +gpu.module @xevm_module{ +  gpu.func @load_store_matrix_2(%arg0: !xegpu.mem_desc<32x32xf32>) { +    %c0 = arith.constant 0 : index +    %c1 = arith.constant 1 : index +    %1 = xegpu.load_matrix %arg0[%c0, %c1] <{layout = #xegpu.layout<lane_layout = [4, 4], lane_data = [2, 1]>}> : !xegpu.mem_desc<32x32xf32>, index, index -> vector<8x4xf32> +    xegpu.store_matrix %1, %arg0[%c0, %c1] <{layout = #xegpu.layout<lane_layout = [4, 4], lane_data = [2, 1]>}> : vector<8x4xf32>, !xegpu.mem_desc<32x32xf32>, index, index +    gpu.return +  } +} + +// ----- +// CHECK-LABEL: gpu.func @load_store_matrix_3({{.*}}) { +// CHECK: %[[MAT:.*]] = xegpu.load_matrix %arg0[%{{.*}}, %{{.*}}] <{subgroup_block_io}>: +// CHECK-SAME: !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<block = [16, 1], stride = [1, 32]>>, index, index -> vector<1x2xf32> +// CHECK: xegpu.store_matrix %[[MAT]], %arg0[%{{.*}}, %{{.*}}] <{subgroup_block_io}>: +// CHECK-SAME: vector<1x2xf32>, !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<block = [16, 1], stride = [1, 32]>>, index, index +gpu.module @xevm_module{ +  gpu.func @load_store_matrix_3(%arg0: !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [1, 32], block = [16, 1]>>) { +    %c0 = arith.constant 0 : index +    %c1 = arith.constant 1 : index +    %1 = xegpu.load_matrix %arg0[%c0, %c1] {subgroup_block_io, layout = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 1]>} : +      !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [1, 32], block = [16, 1]>>, index, index -> vector<16x2xf32> +    xegpu.store_matrix %1, %arg0[%c0, %c1] {subgroup_block_io, layout = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 1]>} : +      vector<16x2xf32>, !xegpu.mem_desc<32x32xf32, #xegpu.mem_layout<stride = [1, 32], block = [16, 1]>>, index, index +    gpu.return +  } +} diff --git a/mlir/test/Target/LLVMIR/nvvm/convert_stochastic_rounding.mlir b/mlir/test/Target/LLVMIR/nvvm/convert_stochastic_rounding.mlir new file mode 100644 index 0000000..b5bb223 --- /dev/null +++ b/mlir/test/Target/LLVMIR/nvvm/convert_stochastic_rounding.mlir @@ -0,0 +1,182 @@ +// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s + +// ----- + +// Test valid architectures work + +// Valid case on sm_100a +gpu.module @valid_f16x2_rs_sm_100a [#nvvm.target<chip = "sm_100a">] { +  func.func @convert_rs() { +    %f1 = llvm.mlir.constant(1.0 : f32) : f32 +    %f2 = llvm.mlir.constant(2.0 : f32) : f32 +    %rbits = llvm.mlir.constant(0x12345678 : i32) : i32 +    %res = nvvm.convert.f32x2.to.f16x2 %f1, %f2, %rbits : vector<2xf16> +    return +  } +} + +// Valid case on sm_103a +gpu.module @valid_bf16x2_rs_sm_103a [#nvvm.target<chip = "sm_103a">] { +  func.func @convert_rs() { +    %f1 = llvm.mlir.constant(1.0 : f32) : f32 +    %f2 = llvm.mlir.constant(2.0 : f32) : f32 +    %rbits = llvm.mlir.constant(0 : i32) : i32 +    %res = nvvm.convert.f32x2.to.bf16x2 %f1, %f2, %rbits : vector<2xbf16> +    return +  } +} + +// ----- + +// Test F32x2 -> F16x2 with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x2_to_f16x2_rs +llvm.func @convert_f32x2_to_f16x2_rs(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xf16> { +  // CHECK: %{{.*}} = call <2 x half> @llvm.nvvm.ff2f16x2.rs(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.f16x2 %srcA, %srcB,  %rbits : vector<2xf16> +  llvm.return %res : vector<2xf16> +} + +// CHECK-LABEL: @convert_f32x2_to_f16x2_rs_satfinite +llvm.func @convert_f32x2_to_f16x2_rs_satfinite(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xf16> { +  // CHECK: %{{.*}} = call <2 x half> @llvm.nvvm.ff2f16x2.rs.satfinite(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.f16x2 %srcA, %srcB, %rbits {sat = #nvvm.sat_mode<satfinite>} : vector<2xf16> +  llvm.return %res : vector<2xf16> +} + +// CHECK-LABEL: @convert_f32x2_to_f16x2_rs_relu +llvm.func @convert_f32x2_to_f16x2_rs_relu(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xf16> { +  // CHECK: %{{.*}} = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.f16x2 %srcA, %srcB, %rbits {relu = true} : vector<2xf16> +  llvm.return %res : vector<2xf16> +} + +// CHECK-LABEL: @convert_f32x2_to_f16x2_rs_relu_satfinite +llvm.func @convert_f32x2_to_f16x2_rs_relu_satfinite(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xf16> { +  // CHECK: %{{.*}} = call <2 x half> @llvm.nvvm.ff2f16x2.rs.relu.satfinite(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.f16x2 %srcA, %srcB, %rbits {relu = true, sat = #nvvm.sat_mode<satfinite>} : vector<2xf16> +  llvm.return %res : vector<2xf16> +} + +// ----- + +// Test F32x2 -> BF16x2 with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x2_to_bf16x2_rs +llvm.func @convert_f32x2_to_bf16x2_rs(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xbf16> { +  // CHECK: %{{.*}} = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.bf16x2 %srcA, %srcB,  %rbits : vector<2xbf16> +  llvm.return %res : vector<2xbf16> +} + +// CHECK-LABEL: @convert_f32x2_to_bf16x2_rs_satfinite +llvm.func @convert_f32x2_to_bf16x2_rs_satfinite(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xbf16> { +  // CHECK: %{{.*}} = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.satfinite(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.bf16x2 %srcA, %srcB, %rbits {sat = #nvvm.sat_mode<satfinite>} : vector<2xbf16> +  llvm.return %res : vector<2xbf16> +} + +// CHECK-LABEL: @convert_f32x2_to_bf16x2_rs_relu +llvm.func @convert_f32x2_to_bf16x2_rs_relu(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xbf16> { +  // CHECK: %{{.*}} = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.bf16x2 %srcA, %srcB, %rbits {relu = true} : vector<2xbf16> +  llvm.return %res : vector<2xbf16> +} + +// CHECK-LABEL: @convert_f32x2_to_bf16x2_rs_relu_satfinite +llvm.func @convert_f32x2_to_bf16x2_rs_relu_satfinite(%srcA : f32, %srcB : f32, %rbits : i32) -> vector<2xbf16> { +  // CHECK: %{{.*}} = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rs.relu.satfinite(float %{{.*}}, float %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x2.to.bf16x2 %srcA, %srcB, %rbits {relu = true, sat = #nvvm.sat_mode<satfinite>} : vector<2xbf16> +  llvm.return %res : vector<2xbf16> +} + +// ----- + +// Test F32x4 -> F8x4 (E4M3) with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x4_to_f8x4_e4m3_rs +llvm.func @convert_f32x4_to_f8x4_e4m3_rs(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f8x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f8E4M3FN) +  llvm.return %res : vector<4xi8> +} + +// CHECK-LABEL: @convert_f32x4_to_f8x4_e4m3_rs_relu +llvm.func @convert_f32x4_to_f8x4_e4m3_rs_relu(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e4m3x4.rs.relu.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f8x4 %src, %rbits {relu = true} : vector<4xf32> -> vector<4xi8> (f8E4M3FN) +  llvm.return %res : vector<4xi8> +} + +// ----- + +// Test F32x4 -> F8x4 (E5M2) with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x4_to_f8x4_e5m2_rs +llvm.func @convert_f32x4_to_f8x4_e5m2_rs(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f8x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f8E5M2) +  llvm.return %res : vector<4xi8> +} + +// CHECK-LABEL: @convert_f32x4_to_f8x4_e5m2_rs_relu +llvm.func @convert_f32x4_to_f8x4_e5m2_rs_relu(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e5m2x4.rs.relu.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f8x4 %src, %rbits {relu = true} : vector<4xf32> -> vector<4xi8> (f8E5M2) +  llvm.return %res : vector<4xi8> +} + +// ----- + +// Test F32x4 -> F6x4 (E2M3) with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x4_to_f6x4_e2m3_rs +llvm.func @convert_f32x4_to_f6x4_e2m3_rs(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f6x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f6E2M3FN) +  llvm.return %res : vector<4xi8> +} + +// CHECK-LABEL: @convert_f32x4_to_f6x4_e2m3_rs_relu +llvm.func @convert_f32x4_to_f6x4_e2m3_rs_relu(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e2m3x4.rs.relu.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f6x4 %src, %rbits {relu = true} : vector<4xf32> -> vector<4xi8> (f6E2M3FN) +  llvm.return %res : vector<4xi8> +} + +// ----- + +// Test F32x4 -> F6x4 (E3M2) with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x4_to_f6x4_e3m2_rs +llvm.func @convert_f32x4_to_f6x4_e3m2_rs(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f6x4 %src, %rbits : vector<4xf32> -> vector<4xi8> (f6E3M2FN) +  llvm.return %res : vector<4xi8> +} + +// CHECK-LABEL: @convert_f32x4_to_f6x4_e3m2_rs_relu +llvm.func @convert_f32x4_to_f6x4_e3m2_rs_relu(%src : vector<4xf32>, %rbits : i32) -> vector<4xi8> { +  // CHECK: %{{.*}} = call <4 x i8> @llvm.nvvm.f32x4.to.e3m2x4.rs.relu.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f6x4 %src, %rbits {relu = true} : vector<4xf32> -> vector<4xi8> (f6E3M2FN) +  llvm.return %res : vector<4xi8> +} + +// ----- + +// Test F32x4 -> F4x4 (E2M1) with stochastic rounding (.rs) + +// CHECK-LABEL: @convert_f32x4_to_f4x4_e2m1_rs +llvm.func @convert_f32x4_to_f4x4_e2m1_rs(%src : vector<4xf32>, %rbits : i32) -> i16 { +  // CHECK: %{{.*}} = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f4x4 %src, %rbits : vector<4xf32> -> i16 (f4E2M1FN) +  llvm.return %res : i16 +} + +// CHECK-LABEL: @convert_f32x4_to_f4x4_e2m1_rs_relu +llvm.func @convert_f32x4_to_f4x4_e2m1_rs_relu(%src : vector<4xf32>, %rbits : i32) -> i16 { +  // CHECK: %{{.*}} = call i16 @llvm.nvvm.f32x4.to.e2m1x4.rs.relu.satfinite(<4 x float> %{{.*}}, i32 %{{.*}}) +  %res = nvvm.convert.f32x4.to.f4x4 %src, %rbits {relu = true} : vector<4xf32> -> i16 (f4E2M1FN) +  llvm.return %res : i16 +} + diff --git a/mlir/test/Target/SPIRV/group-ops.mlir b/mlir/test/Target/SPIRV/group-ops.mlir index cf519cb..6f19b35 100644 --- a/mlir/test/Target/SPIRV/group-ops.mlir +++ b/mlir/test/Target/SPIRV/group-ops.mlir @@ -1,11 +1,13 @@ -// RUN: mlir-translate -no-implicit-module -test-spirv-roundtrip -split-input-file %s | FileCheck %s +// RUN: mlir-translate --no-implicit-module --test-spirv-roundtrip --split-input-file %s | FileCheck %s  // RUN: %if spirv-tools %{ rm -rf %t %}  // RUN: %if spirv-tools %{ mkdir %t %}  // RUN: %if spirv-tools %{ mlir-translate --no-implicit-module --serialize-spirv --split-input-file --spirv-save-validation-files-with-prefix=%t/module %s %}  // RUN: %if spirv-tools %{ spirv-val %t %} -spirv.module Logical GLSL450 requires #spirv.vce<v1.3, [Shader, Linkage, SubgroupBallotKHR, Groups, SubgroupBufferBlockIOINTEL, GroupNonUniformArithmetic, GroupUniformArithmeticKHR], [SPV_KHR_storage_buffer_storage_class, SPV_KHR_shader_ballot, SPV_INTEL_subgroups, SPV_KHR_uniform_group_instructions]> { +spirv.module Logical GLSL450 requires #spirv.vce<v1.3, +  [Shader, Linkage, SubgroupBallotKHR, Groups, GroupNonUniformArithmetic, GroupUniformArithmeticKHR], +  [SPV_KHR_storage_buffer_storage_class, SPV_KHR_shader_ballot, SPV_KHR_uniform_group_instructions]> {    // CHECK-LABEL: @subgroup_ballot    spirv.func @subgroup_ballot(%predicate: i1) -> vector<4xi32> "None" {      // CHECK: %{{.*}} = spirv.KHR.SubgroupBallot %{{.*}}: vector<4xi32> @@ -24,30 +26,6 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.3, [Shader, Linkage, Subgrou      %0 = spirv.GroupBroadcast <Workgroup> %value, %localid : f32, vector<3xi32>      spirv.ReturnValue %0: f32    } -  // CHECK-LABEL: @subgroup_block_read_intel -  spirv.func @subgroup_block_read_intel(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 "None" { -    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> i32 -    %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> i32 -    spirv.ReturnValue %0: i32 -  } -  // CHECK-LABEL: @subgroup_block_read_intel_vector -  spirv.func @subgroup_block_read_intel_vector(%ptr : !spirv.ptr<i32, StorageBuffer>) -> vector<3xi32> "None" { -    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32> -    %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32> -    spirv.ReturnValue %0: vector<3xi32> -  } -  // CHECK-LABEL: @subgroup_block_write_intel -  spirv.func @subgroup_block_write_intel(%ptr : !spirv.ptr<i32, StorageBuffer>, %value: i32) -> () "None" { -    // CHECK: spirv.INTEL.SubgroupBlockWrite %{{.*}}, %{{.*}} : i32 -    spirv.INTEL.SubgroupBlockWrite "StorageBuffer" %ptr, %value : i32 -    spirv.Return -  } -  // CHECK-LABEL: @subgroup_block_write_intel_vector -  spirv.func @subgroup_block_write_intel_vector(%ptr : !spirv.ptr<i32, StorageBuffer>, %value: vector<3xi32>) -> () "None" { -    // CHECK: spirv.INTEL.SubgroupBlockWrite %{{.*}}, %{{.*}} : vector<3xi32> -    spirv.INTEL.SubgroupBlockWrite "StorageBuffer" %ptr, %value : vector<3xi32> -    spirv.Return -  }    // CHECK-LABEL: @group_iadd    spirv.func @group_iadd(%value: i32) -> i32 "None" {      // CHECK: spirv.GroupIAdd <Workgroup> <Reduce> %{{.*}} : i32 diff --git a/mlir/test/Target/SPIRV/subgroup-block-intel.mlir b/mlir/test/Target/SPIRV/subgroup-block-intel.mlir new file mode 100644 index 0000000..14060e6 --- /dev/null +++ b/mlir/test/Target/SPIRV/subgroup-block-intel.mlir @@ -0,0 +1,34 @@ +// RUN: mlir-translate --no-implicit-module --test-spirv-roundtrip %s | FileCheck %s + +// RUN: %if spirv-tools %{ rm -rf %t %} +// RUN: %if spirv-tools %{ mkdir %t %} +// RUN: %if spirv-tools %{ mlir-translate --no-implicit-module --serialize-spirv --spirv-save-validation-files-with-prefix=%t/module %s %} +// RUN: %if spirv-tools %{ spirv-val %t %} + +spirv.module Physical64 GLSL450 requires #spirv.vce<v1.3, [Addresses, Shader, Linkage, SubgroupBufferBlockIOINTEL], +                                                          [SPV_KHR_storage_buffer_storage_class, SPV_INTEL_subgroups]> { +  // CHECK-LABEL: @subgroup_block_read_intel +  spirv.func @subgroup_block_read_intel(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 "None" { +    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> i32 +    %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> i32 +    spirv.ReturnValue %0: i32 +  } +  // CHECK-LABEL: @subgroup_block_read_intel_vector +  spirv.func @subgroup_block_read_intel_vector(%ptr : !spirv.ptr<i32, StorageBuffer>) -> vector<3xi32> "None" { +    // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32> +    %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr<i32, StorageBuffer> -> vector<3xi32> +    spirv.ReturnValue %0: vector<3xi32> +  } +  // CHECK-LABEL: @subgroup_block_write_intel +  spirv.func @subgroup_block_write_intel(%ptr : !spirv.ptr<i32, StorageBuffer>, %value: i32) -> () "None" { +    // CHECK: spirv.INTEL.SubgroupBlockWrite %{{.*}}, %{{.*}} : i32 +    spirv.INTEL.SubgroupBlockWrite "StorageBuffer" %ptr, %value : i32 +    spirv.Return +  } +  // CHECK-LABEL: @subgroup_block_write_intel_vector +  spirv.func @subgroup_block_write_intel_vector(%ptr : !spirv.ptr<i32, StorageBuffer>, %value: vector<3xi32>) -> () "None" { +    // CHECK: spirv.INTEL.SubgroupBlockWrite %{{.*}}, %{{.*}} : vector<3xi32> +    spirv.INTEL.SubgroupBlockWrite "StorageBuffer" %ptr, %value : vector<3xi32> +    spirv.Return +  } +} diff --git a/mlir/test/Transforms/test-legalizer-no-materializations.mlir b/mlir/test/Transforms/test-legalizer-no-materializations.mlir new file mode 100644 index 0000000..82dd742 --- /dev/null +++ b/mlir/test/Transforms/test-legalizer-no-materializations.mlir @@ -0,0 +1,67 @@ +// RUN: mlir-opt -allow-unregistered-dialect -split-input-file -test-legalize-patterns="allow-pattern-rollback=0 build-materializations=0 attach-debug-materialization-kind=1" -verify-diagnostics %s | FileCheck %s --check-prefix=CHECK-KIND + +// CHECK-LABEL: func @dropped_input_in_use +// CHECK-KIND-LABEL: func @dropped_input_in_use +func.func @dropped_input_in_use(%arg: i16, %arg2: i64) { +  // CHECK-NEXT: %[[cast:.*]] = "test.cast"() : () -> i16 +  // CHECK-NEXT: "work"(%[[cast]]) : (i16) +  // CHECK-KIND-NEXT: %[[cast:.*]] = builtin.unrealized_conversion_cast to i16 {__kind__ = "source"} +  // CHECK-KIND-NEXT: "work"(%[[cast]]) : (i16) +  // expected-remark@+1 {{op 'work' is not legalizable}} +  "work"(%arg) : (i16) -> () +} + +// ----- + +// CHECK-KIND-LABEL: func @test_lookup_without_converter +//       CHECK-KIND:   %[[producer:.*]] = "test.valid_producer"() : () -> i16 +//       CHECK-KIND:   %[[cast:.*]] = builtin.unrealized_conversion_cast %[[producer]] : i16 to f64 {__kind__ = "target"} +//       CHECK-KIND:   "test.valid_consumer"(%[[cast]]) : (f64) -> () +//       CHECK-KIND:   "test.valid_consumer"(%[[producer]]) : (i16) -> () +func.func @test_lookup_without_converter() { +  %0 = "test.replace_with_valid_producer"() {type = i16} : () -> (i64) +  "test.replace_with_valid_consumer"(%0) {with_converter} : (i64) -> () +  // Make sure that the second "replace_with_valid_consumer" lowering does not +  // lookup the materialization that was created for the above op. +  "test.replace_with_valid_consumer"(%0) : (i64) -> () +  // expected-remark@+1 {{op 'func.return' is not legalizable}} +  return +} + +// ----- + +// CHECK-LABEL: func @remap_moved_region_args +func.func @remap_moved_region_args() { +  // CHECK-NEXT: return +  // CHECK-NEXT: ^bb1(%[[arg0:.*]]: i64, %[[arg1:.*]]: i16, %[[arg2:.*]]: i64, %[[arg3:.*]]: f32): +  // CHECK-NEXT: %[[cast1:.*]]:2 = builtin.unrealized_conversion_cast %[[arg3]] : f32 to f16, f16 +  // CHECK-NEXT: %[[cast2:.*]] = builtin.unrealized_conversion_cast %[[arg2]] : i64 to f64 +  // CHECK-NEXT: %[[cast3:.*]] = builtin.unrealized_conversion_cast %[[arg0]] : i64 to f64 +  // CHECK-NEXT: %[[cast4:.*]] = "test.cast"(%[[cast1]]#0, %[[cast1]]#1) : (f16, f16) -> f32 +  // CHECK-NEXT: "test.valid"(%[[cast3]], %[[cast2]], %[[cast4]]) : (f64, f64, f32) +  "test.region"() ({ +    ^bb1(%i0: i64, %unused: i16, %i1: i64, %2: f32): +      "test.invalid"(%i0, %i1, %2) : (i64, i64, f32) -> () +  }) : () -> () +  // expected-remark@+1 {{op 'func.return' is not legalizable}} +  return +} + +// ----- + +// CHECK-LABEL: func @remap_cloned_region_args +func.func @remap_cloned_region_args() { +  // CHECK-NEXT: return +  // CHECK-NEXT: ^bb1(%[[arg0:.*]]: i64, %[[arg1:.*]]: i16, %[[arg2:.*]]: i64, %[[arg3:.*]]: f32): +  // CHECK-NEXT: %[[cast1:.*]]:2 = builtin.unrealized_conversion_cast %[[arg3]] : f32 to f16, f16 +  // CHECK-NEXT: %[[cast2:.*]] = builtin.unrealized_conversion_cast %[[arg2]] : i64 to f64 +  // CHECK-NEXT: %[[cast3:.*]] = builtin.unrealized_conversion_cast %[[arg0]] : i64 to f64 +  // CHECK-NEXT: %[[cast4:.*]] = "test.cast"(%[[cast1]]#0, %[[cast1]]#1) : (f16, f16) -> f32 +  // CHECK-NEXT: "test.valid"(%[[cast3]], %[[cast2]], %[[cast4]]) : (f64, f64, f32) +  "test.region"() ({ +    ^bb1(%i0: i64, %unused: i16, %i1: i64, %2: f32): +      "test.invalid"(%i0, %i1, %2) : (i64, i64, f32) -> () +  }) {legalizer.should_clone} : () -> () +  // expected-remark@+1 {{op 'func.return' is not legalizable}} +  return +} diff --git a/mlir/test/Transforms/test-legalizer.mlir b/mlir/test/Transforms/test-legalizer.mlir index 94c5bb4..7c43bb7 100644 --- a/mlir/test/Transforms/test-legalizer.mlir +++ b/mlir/test/Transforms/test-legalizer.mlir @@ -1,7 +1,6 @@  // RUN: mlir-opt -allow-unregistered-dialect -split-input-file -test-legalize-patterns="allow-pattern-rollback=1" -verify-diagnostics %s | FileCheck %s  // RUN: mlir-opt -allow-unregistered-dialect -split-input-file -test-legalize-patterns="allow-pattern-rollback=1" -verify-diagnostics -profile-actions-to=- %s | FileCheck %s --check-prefix=CHECK-PROFILER  // RUN: mlir-opt -allow-unregistered-dialect -split-input-file -test-legalize-patterns="allow-pattern-rollback=0" -verify-diagnostics %s | FileCheck %s -// RUN: mlir-opt -allow-unregistered-dialect -split-input-file -test-legalize-patterns="allow-pattern-rollback=0 build-materializations=0 attach-debug-materialization-kind=1" -verify-diagnostics %s | FileCheck %s --check-prefix=CHECK-KIND  // CHECK-PROFILER: "name": "pass-execution", "cat": "PERF", "ph": "B"  // CHECK-PROFILER: "name": "apply-conversion", "cat": "PERF", "ph": "B" @@ -146,36 +145,6 @@ func.func @no_remap_nested() {  // ----- -// CHECK-LABEL: func @remap_moved_region_args -func.func @remap_moved_region_args() { -  // CHECK-NEXT: return -  // CHECK-NEXT: ^bb1(%{{.*}}: f64, %{{.*}}: f64, %{{.*}}: f16, %{{.*}}: f16): -  // CHECK-NEXT: "test.cast"{{.*}} : (f16, f16) -> f32 -  // CHECK-NEXT: "test.valid"{{.*}} : (f64, f64, f32) -  "test.region"() ({ -    ^bb1(%i0: i64, %unused: i16, %i1: i64, %2: f32): -      "test.invalid"(%i0, %i1, %2) : (i64, i64, f32) -> () -  }) : () -> () -  // expected-remark@+1 {{op 'func.return' is not legalizable}} -  return -} - -// ----- - -// CHECK-LABEL: func @remap_cloned_region_args -func.func @remap_cloned_region_args() { -  // CHECK-NEXT: return -  // CHECK-NEXT: ^bb1(%{{.*}}: f64, %{{.*}}: f64, %{{.*}}: f16, %{{.*}}: f16): -  // CHECK-NEXT: "test.cast"{{.*}} : (f16, f16) -> f32 -  // CHECK-NEXT: "test.valid"{{.*}} : (f64, f64, f32) -  "test.region"() ({ -    ^bb1(%i0: i64, %unused: i16, %i1: i64, %2: f32): -      "test.invalid"(%i0, %i1, %2) : (i64, i64, f32) -> () -  }) {legalizer.should_clone} : () -> () -  // expected-remark@+1 {{op 'func.return' is not legalizable}} -  return -} -  // CHECK-LABEL: func @remap_drop_region  func.func @remap_drop_region() {    // CHECK-NEXT: return @@ -191,12 +160,9 @@ func.func @remap_drop_region() {  // -----  // CHECK-LABEL: func @dropped_input_in_use -// CHECK-KIND-LABEL: func @dropped_input_in_use  func.func @dropped_input_in_use(%arg: i16, %arg2: i64) {    // CHECK-NEXT: %[[cast:.*]] = "test.cast"() : () -> i16    // CHECK-NEXT: "work"(%[[cast]]) : (i16) -  // CHECK-KIND-NEXT: %[[cast:.*]] = builtin.unrealized_conversion_cast to i16 {__kind__ = "source"} -  // CHECK-KIND-NEXT: "work"(%[[cast]]) : (i16)    // expected-remark@+1 {{op 'work' is not legalizable}}    "work"(%arg) : (i16) -> ()  } @@ -452,11 +418,6 @@ func.func @test_multiple_1_to_n_replacement() {  //       CHECK:   %[[cast:.*]] = "test.cast"(%[[producer]]) : (i16) -> f64  //       CHECK:   "test.valid_consumer"(%[[cast]]) : (f64) -> ()  //       CHECK:   "test.valid_consumer"(%[[producer]]) : (i16) -> () -// CHECK-KIND-LABEL: func @test_lookup_without_converter -//       CHECK-KIND:   %[[producer:.*]] = "test.valid_producer"() : () -> i16 -//       CHECK-KIND:   %[[cast:.*]] = builtin.unrealized_conversion_cast %[[producer]] : i16 to f64 {__kind__ = "target"} -//       CHECK-KIND:   "test.valid_consumer"(%[[cast]]) : (f64) -> () -//       CHECK-KIND:   "test.valid_consumer"(%[[producer]]) : (i16) -> ()  func.func @test_lookup_without_converter() {    %0 = "test.replace_with_valid_producer"() {type = i16} : () -> (i64)    "test.replace_with_valid_consumer"(%0) {with_converter} : (i64) -> () diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp index fd2b943..12edecc 100644 --- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp +++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp @@ -1553,8 +1553,7 @@ struct TestLegalizePatternDriver                             [](Type type) { return type.isF32(); });      });      target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) { -      return converter.isSignatureLegal(op.getFunctionType()) && -             converter.isLegal(&op.getBody()); +      return converter.isSignatureLegal(op.getFunctionType());      });      target.addDynamicallyLegalOp<func::CallOp>(          [&](func::CallOp op) { return converter.isLegal(op); }); @@ -2156,8 +2155,7 @@ struct TestTypeConversionDriver                recursiveType.getName() == "outer_converted_type");      });      target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) { -      return converter.isSignatureLegal(op.getFunctionType()) && -             converter.isLegal(&op.getBody()); +      return converter.isSignatureLegal(op.getFunctionType());      });      target.addDynamicallyLegalOp<TestCastOp>([&](TestCastOp op) {        // Allow casts from F64 to F32. diff --git a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp index 76d4611..93d5144 100644 --- a/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp +++ b/mlir/test/lib/Dialect/XeGPU/TestXeGPUTransforms.cpp @@ -200,7 +200,8 @@ class TestStepOpPattern : public OpConversionPattern<vector::StepOp> {      Value sgId =          gpu::SubgroupIdOp::create(rewriter, loc, /*upper_bound=*/nullptr); -    auto maybeOffsets = sliceAttr.getOffsets(rewriter, loc, sgId, wgShape); +    auto maybeOffsets = +        sliceAttr.computeDistributedCoords(rewriter, loc, sgId, wgShape);      if (failed(maybeOffsets))        return failure(); diff --git a/openmp/runtime/src/CMakeLists.txt b/openmp/runtime/src/CMakeLists.txt index 6ac047a..5dd7f4b 100644 --- a/openmp/runtime/src/CMakeLists.txt +++ b/openmp/runtime/src/CMakeLists.txt @@ -254,23 +254,35 @@ set(LIBOMP_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE)  # Add symbolic links to libomp  if(NOT WIN32) -  add_custom_command(TARGET omp POST_BUILD -    COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIBOMP_LIB_FILE} -      libgomp${LIBOMP_LIBRARY_SUFFIX} -    COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIBOMP_LIB_FILE} -      libiomp5${LIBOMP_LIBRARY_SUFFIX} -    WORKING_DIRECTORY ${LIBOMP_LIBRARY_DIR} -  ) -  if(LIBOMP_ENABLE_SHARED) -    if(APPLE) -      set(VERSIONED_LIBGOMP_NAME libgomp.1${LIBOMP_LIBRARY_SUFFIX}) -    else() -      set(VERSIONED_LIBGOMP_NAME libgomp${LIBOMP_LIBRARY_SUFFIX}.1) -    endif() +  if(AIX) +    # On AIX, libomp.a is the name for both static and shared objects. +    set(LIBOMP_AIX_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX})      add_custom_command(TARGET omp POST_BUILD -      COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIBOMP_LIB_FILE} ${VERSIONED_LIBGOMP_NAME} +      COMMAND ${CMAKE_COMMAND} -E create_symlink +        ${LIBOMP_LIB_NAME}${LIBOMP_AIX_SUFFIX} libgomp${LIBOMP_AIX_SUFFIX} +      COMMAND ${CMAKE_COMMAND} -E create_symlink +        ${LIBOMP_LIB_NAME}${LIBOMP_AIX_SUFFIX} libiomp5${LIBOMP_AIX_SUFFIX}        WORKING_DIRECTORY ${LIBOMP_LIBRARY_DIR}      ) +  else() +    add_custom_command(TARGET omp POST_BUILD +      COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIBOMP_LIB_FILE} +        libiomp5${LIBOMP_LIBRARY_SUFFIX} +      COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIBOMP_LIB_FILE} +        libgomp${LIBOMP_LIBRARY_SUFFIX} +      WORKING_DIRECTORY ${LIBOMP_LIBRARY_DIR} +    ) +    if(LIBOMP_ENABLE_SHARED) +      if(APPLE) +        set(VERSIONED_LIBGOMP_NAME libgomp.1${LIBOMP_LIBRARY_SUFFIX}) +      else() +        set(VERSIONED_LIBGOMP_NAME libgomp${LIBOMP_LIBRARY_SUFFIX}.1) +      endif() +      add_custom_command(TARGET omp POST_BUILD +        COMMAND ${CMAKE_COMMAND} -E create_symlink ${LIBOMP_LIB_FILE} ${VERSIONED_LIBGOMP_NAME} +        WORKING_DIRECTORY ${LIBOMP_LIBRARY_DIR} +      ) +    endif()    endif()  endif()  | 
