diff options
238 files changed, 17450 insertions, 5934 deletions
diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 672dd75..925912d 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -442,11 +442,22 @@ jobs: name: ${{ needs.prepare.outputs.release-binary-filename }}-attestation path: ${{ needs.prepare.outputs.release-binary-filename }}.jsonl + - name: Checkout Release Scripts + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + sparse-checkout: | + llvm/utils/release/github-upload-release.py + llvm/utils/git/requirements.txt + sparse-checkout-cone-mode: false + + - name: Install Python Requirements + run: | + pip install --require-hashes -r ./llvm/utils/git/requirements.txt + - name: Upload Release shell: bash run: | - sudo apt install python3-github - ./llvm-project/llvm/utils/release/github-upload-release.py \ + ./llvm/utils/release/github-upload-release.py \ --token ${{ github.token }} \ --release ${{ needs.prepare.outputs.release-version }} \ upload \ diff --git a/clang/docs/OpenMPSupport.rst b/clang/docs/OpenMPSupport.rst index cdbd695..72f1385 100644 --- a/clang/docs/OpenMPSupport.rst +++ b/clang/docs/OpenMPSupport.rst @@ -306,7 +306,7 @@ implementation. +------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+ | misc | OMP_NUM_TEAMS and OMP_TEAMS_THREAD_LIMIT env vars | :good:`done` | D138769 | +------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+ -| misc | 'target_device' selector in context specifier | :none:`unclaimed` | | +| misc | 'target_device' selector in context specifier | :none:`worked on` | | +------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+ | misc | begin/end declare variant | :good:`done` | D71179 | +------------------------------+--------------------------------------------------------------+--------------------------+-----------------------------------------------------------------------+ diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index d4db877..c4fa017 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -382,6 +382,9 @@ Bug Fixes to C++ Support - Fix a crash when using ``source_location`` in the trailing return type of a lambda expression. (#GH67134) - A follow-up fix was added for (#GH61460), as the previous fix was not entirely correct. (#GH86361) - Fixed a crash in the typo correction of an invalid CTAD guide. (#GH107887) +- Fixed a crash when clang tries to subtitute parameter pack while retaining the parameter + pack. #GH63819, #GH107560 + Bug Fixes to AST Handling ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index 3dc04f6..6cf03d2 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -4763,6 +4763,7 @@ def HLSLSaturate : LangBuiltin<"HLSL_LANG"> { let Prototype = "void(...)"; } + def HLSLSelect : LangBuiltin<"HLSL_LANG"> { let Spellings = ["__builtin_hlsl_select"]; let Attributes = [NoThrow, Const]; @@ -4775,6 +4776,12 @@ def HLSLSign : LangBuiltin<"HLSL_LANG"> { let Prototype = "void(...)"; } +def HLSLStep: LangBuiltin<"HLSL_LANG"> { + let Spellings = ["__builtin_hlsl_step"]; + let Attributes = [NoThrow, Const]; + let Prototype = "void(...)"; +} + // Builtins for XRay. def XRayCustomEvent : Builtin { let Spellings = ["__xray_customevent"]; diff --git a/clang/include/clang/Basic/BuiltinsX86.def b/clang/include/clang/Basic/BuiltinsX86.def index 48376ee..3f47e34 100644 --- a/clang/include/clang/Basic/BuiltinsX86.def +++ b/clang/include/clang/Basic/BuiltinsX86.def @@ -2122,6 +2122,36 @@ TARGET_BUILTIN(__builtin_ia32_vpdpwuud256, "V8iV8iV8iV8i", "nV:256:", "avxvnniin TARGET_BUILTIN(__builtin_ia32_vpdpwuuds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16|avx10.2-256") TARGET_BUILTIN(__builtin_ia32_vpdpwuuds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16|avx10.2-256") +// AVX10.2 SATCVT-DS +TARGET_BUILTIN(__builtin_ia32_vcvttsd2sis32, "iV2dIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttsd2usis32, "UiV2dIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttss2sis32, "iV4fIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttss2usis32, "UiV4fIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2dqs128_mask, "V4iV2dV4iUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2dqs256_round_mask, "V4iV4dV4iUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2dqs512_round_mask, "V8iV8dV8iUcIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2udqs128_mask, "V4iV2dV4iUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2udqs256_round_mask, "V4iV4dV4iUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2udqs512_round_mask, "V8iV8dV8iUcIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2qqs128_mask, "V2OiV2dV2OiUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2qqs256_round_mask, "V4OiV4dV4OiUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2qqs512_round_mask, "V8OiV8dV8OiUcIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2uqqs128_mask, "V2OiV2dV2OiUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2uqqs256_round_mask, "V4OiV4dV4OiUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttpd2uqqs512_round_mask, "V8OiV8dV8OiUcIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttps2dqs128_mask, "V4iV4fV4iUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2dqs256_round_mask, "V8iV8fV8iUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2dqs512_round_mask, "V16iV16fV16iUsIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttps2udqs128_mask, "V4iV4fV4iUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2udqs256_round_mask, "V8iV8fV8iUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2udqs512_round_mask, "V16iV16fV16iUsIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttps2qqs128_mask, "V2OiV4fV2OiUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2qqs256_round_mask, "V4OiV4fV4OiUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2qqs512_round_mask, "V8OiV8fV8OiUcIi", "nV:512:", "avx10.2-512") +TARGET_BUILTIN(__builtin_ia32_vcvttps2uqqs128_mask, "V2OiV4fV2OiUc", "nV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2uqqs256_round_mask, "V4OiV4fV4OiUcIi", "nV:256:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttps2uqqs512_round_mask, "V8OiV8fV8OiUcIi", "nV:512:", "avx10.2-512") + // AVX-NE-CONVERT TARGET_BUILTIN(__builtin_ia32_vbcstnebf162ps128, "V4fyC*", "nV:128:", "avxneconvert") TARGET_BUILTIN(__builtin_ia32_vbcstnebf162ps256, "V8fyC*", "nV:256:", "avxneconvert") diff --git a/clang/include/clang/Basic/BuiltinsX86_64.def b/clang/include/clang/Basic/BuiltinsX86_64.def index 5e00916..db381aa 100644 --- a/clang/include/clang/Basic/BuiltinsX86_64.def +++ b/clang/include/clang/Basic/BuiltinsX86_64.def @@ -99,6 +99,12 @@ TARGET_BUILTIN(__builtin_ia32_vcvttsh2si64, "OiV8xIi", "ncV:128:", "avx512fp16") TARGET_BUILTIN(__builtin_ia32_vcvttsh2usi64, "UOiV8xIi", "ncV:128:", "avx512fp16") TARGET_BUILTIN(__builtin_ia32_directstore_u64, "vULi*ULi", "n", "movdiri") +// AVX10.2 SATCVT-DS +TARGET_BUILTIN(__builtin_ia32_vcvttsd2sis64, "OiV2dIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttsd2usis64, "UOiV2dIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttss2sis64, "OiV4fIi", "ncV:128:", "avx10.2-256") +TARGET_BUILTIN(__builtin_ia32_vcvttss2usis64, "UOiV4fIi", "ncV:128:", "avx10.2-256") + // UINTR TARGET_BUILTIN(__builtin_ia32_clui, "v", "n", "uintr") TARGET_BUILTIN(__builtin_ia32_stui, "v", "n", "uintr") diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index 3ba9732..265350e 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -3335,7 +3335,11 @@ bool Compiler<Emitter>::VisitObjCBoxedExpr(const ObjCBoxedExpr *E) { if (!E->isExpressibleAsConstantInitializer()) return this->discard(SubExpr) && this->emitInvalid(E); - return this->delegate(SubExpr); + assert(classifyPrim(E) == PT_Ptr); + if (std::optional<unsigned> I = P.getOrCreateDummy(E)) + return this->emitGetPtrGlobal(*I, E); + + return false; } template <class Emitter> @@ -4118,7 +4122,7 @@ bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E, BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString || BuiltinID == Builtin::BI__builtin_ptrauth_sign_constant || BuiltinID == Builtin::BI__builtin_function_start) { - if (std::optional<unsigned> GlobalOffset = P.createGlobal(E)) { + if (std::optional<unsigned> GlobalOffset = P.getOrCreateDummy(E)) { if (!this->emitGetPtrGlobal(*GlobalOffset, E)) return false; diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index 2fa8b40..827a177 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -883,7 +883,7 @@ bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return diagnoseUnknownDecl(S, OpPC, D); assert(AK == AK_Assign); - if (S.getLangOpts().CPlusPlus11) { + if (S.getLangOpts().CPlusPlus14) { const SourceInfo &E = S.Current->getSource(OpPC); S.FFDiag(E, diag::note_constexpr_modify_global); } diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index 7b7c782..3d507e2 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -1611,6 +1611,12 @@ inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off) { inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) { const Pointer &Ptr = S.Stk.peek<Pointer>(); + + if (!Ptr.isBlockPointer()) { + S.Stk.push<Pointer>(Ptr.asIntPointer().baseCast(S.getASTContext(), Off)); + return true; + } + if (!CheckNull(S, OpPC, Ptr, CSK_Base)) return false; if (!CheckSubobject(S, OpPC, Ptr, CSK_Base)) @@ -1624,6 +1630,12 @@ inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) { inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) { const Pointer &Ptr = S.Stk.pop<Pointer>(); + + if (!Ptr.isBlockPointer()) { + S.Stk.push<Pointer>(Ptr.asIntPointer().baseCast(S.getASTContext(), Off)); + return true; + } + if (!CheckNull(S, OpPC, Ptr, CSK_Base)) return false; if (!CheckSubobject(S, OpPC, Ptr, CSK_Base)) diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp index 9eaf0db..282953e 100644 --- a/clang/lib/AST/ByteCode/Pointer.cpp +++ b/clang/lib/AST/ByteCode/Pointer.cpp @@ -667,3 +667,26 @@ IntPointer IntPointer::atOffset(const ASTContext &ASTCtx, .getQuantity(); return IntPointer{this->Desc, this->Value + FieldOffset}; } + +IntPointer IntPointer::baseCast(const ASTContext &ASTCtx, + unsigned BaseOffset) const { + const Record *R = Desc->ElemRecord; + const Descriptor *BaseDesc = nullptr; + + // This iterates over bases and checks for the proper offset. That's + // potentially slow but this case really shouldn't happen a lot. + for (const Record::Base &B : R->bases()) { + if (B.Offset == BaseOffset) { + BaseDesc = B.Desc; + break; + } + } + assert(BaseDesc); + + // Adjust the offset value based on the information from the record layout. + const ASTRecordLayout &Layout = ASTCtx.getASTRecordLayout(R->getDecl()); + CharUnits BaseLayoutOffset = + Layout.getBaseClassOffset(cast<CXXRecordDecl>(BaseDesc->asDecl())); + + return {BaseDesc, Value + BaseLayoutOffset.getQuantity()}; +} diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h index acbef43..ac9b9ed 100644 --- a/clang/lib/AST/ByteCode/Pointer.h +++ b/clang/lib/AST/ByteCode/Pointer.h @@ -46,6 +46,7 @@ struct IntPointer { uint64_t Value; IntPointer atOffset(const ASTContext &ASTCtx, unsigned Offset) const; + IntPointer baseCast(const ASTContext &ASTCtx, unsigned BaseOffset) const; }; enum class Storage { Block, Int, Fn }; diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp index a4f0df8..bd5860b 100644 --- a/clang/lib/AST/ByteCode/Program.cpp +++ b/clang/lib/AST/ByteCode/Program.cpp @@ -144,22 +144,33 @@ std::optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD, return std::nullopt; } -std::optional<unsigned> Program::getOrCreateDummy(const ValueDecl *VD) { +std::optional<unsigned> Program::getOrCreateDummy(const DeclTy &D) { + assert(D); // Dedup blocks since they are immutable and pointers cannot be compared. - if (auto It = DummyVariables.find(VD); It != DummyVariables.end()) + if (auto It = DummyVariables.find(D.getOpaqueValue()); + It != DummyVariables.end()) return It->second; - QualType QT = VD->getType(); - if (const auto *RT = QT->getAs<ReferenceType>()) - QT = RT->getPointeeType(); + QualType QT; + if (const auto *E = D.dyn_cast<const Expr *>()) { + QT = E->getType(); + } else { + const ValueDecl *VD = cast<ValueDecl>(D.get<const Decl *>()); + QT = VD->getType(); + if (const auto *RT = QT->getAs<ReferenceType>()) + QT = RT->getPointeeType(); + } + assert(!QT.isNull()); Descriptor *Desc; if (std::optional<PrimType> T = Ctx.classify(QT)) - Desc = createDescriptor(VD, *T, std::nullopt, true, false); + Desc = createDescriptor(D, *T, std::nullopt, /*IsTemporary=*/true, + /*IsMutable=*/false); else - Desc = createDescriptor(VD, QT.getTypePtr(), std::nullopt, true, false); + Desc = createDescriptor(D, QT.getTypePtr(), std::nullopt, + /*IsTemporary=*/true, /*IsMutable=*/false); if (!Desc) - Desc = allocateDescriptor(VD); + Desc = allocateDescriptor(D); assert(Desc); Desc->makeDummy(); @@ -175,7 +186,7 @@ std::optional<unsigned> Program::getOrCreateDummy(const ValueDecl *VD) { G->block()->invokeCtor(); Globals.push_back(G); - DummyVariables[VD] = I; + DummyVariables[D.getOpaqueValue()] = I; return I; } diff --git a/clang/lib/AST/ByteCode/Program.h b/clang/lib/AST/ByteCode/Program.h index 7f69d97..bd2672a 100644 --- a/clang/lib/AST/ByteCode/Program.h +++ b/clang/lib/AST/ByteCode/Program.h @@ -84,7 +84,7 @@ public: const Expr *Init = nullptr); /// Returns or creates a dummy value for unknown declarations. - std::optional<unsigned> getOrCreateDummy(const ValueDecl *VD); + std::optional<unsigned> getOrCreateDummy(const DeclTy &D); /// Creates a global and returns its index. std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *Init); @@ -209,7 +209,7 @@ private: llvm::DenseMap<const RecordDecl *, Record *> Records; /// Dummy parameter to generate pointers from. - llvm::DenseMap<const ValueDecl *, unsigned> DummyVariables; + llvm::DenseMap<const void *, unsigned> DummyVariables; /// Creates a new descriptor. template <typename... Ts> Descriptor *allocateDescriptor(Ts &&...Args) { diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 9950c06..27abeba 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -18861,6 +18861,16 @@ case Builtin::BI__builtin_hlsl_elementwise_isinf: { return SelectVal; } + case Builtin::BI__builtin_hlsl_step: { + Value *Op0 = EmitScalarExpr(E->getArg(0)); + Value *Op1 = EmitScalarExpr(E->getArg(1)); + assert(E->getArg(0)->getType()->hasFloatingRepresentation() && + E->getArg(1)->getType()->hasFloatingRepresentation() && + "step operands must have a float representation"); + return Builder.CreateIntrinsic( + /*ReturnType=*/Op0->getType(), CGM.getHLSLRuntime().getStepIntrinsic(), + ArrayRef<Value *>{Op0, Op1}, nullptr, "hlsl.step"); + } case Builtin::BI__builtin_hlsl_wave_get_lane_index: { return EmitRuntimeCall(CGM.CreateRuntimeFunction( llvm::FunctionType::get(IntTy, {}, false), "__hlsl_wave_get_lane_index", diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h index 6e22680..a8aabca 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.h +++ b/clang/lib/CodeGen/CGHLSLRuntime.h @@ -81,6 +81,7 @@ public: GENERATE_HLSL_INTRINSIC_FUNCTION(Rsqrt, rsqrt) GENERATE_HLSL_INTRINSIC_FUNCTION(Saturate, saturate) GENERATE_HLSL_INTRINSIC_FUNCTION(Sign, sign) + GENERATE_HLSL_INTRINSIC_FUNCTION(Step, step) GENERATE_HLSL_INTRINSIC_FUNCTION(ThreadId, thread_id) GENERATE_HLSL_INTRINSIC_FUNCTION(FDot, fdot) GENERATE_HLSL_INTRINSIC_FUNCTION(SDot, sdot) diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt index e928b5b..a21e390 100644 --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -151,11 +151,13 @@ set(x86_files avx10_2_512convertintrin.h avx10_2_512minmaxintrin.h avx10_2_512niintrin.h + avx10_2_512satcvtdsintrin.h avx10_2_512satcvtintrin.h avx10_2bf16intrin.h avx10_2convertintrin.h avx10_2minmaxintrin.h avx10_2niintrin.h + avx10_2satcvtdsintrin.h avx10_2satcvtintrin.h avx2intrin.h avx512bf16intrin.h diff --git a/clang/lib/Headers/avx10_2_512satcvtdsintrin.h b/clang/lib/Headers/avx10_2_512satcvtdsintrin.h new file mode 100644 index 0000000..5970ab0 --- /dev/null +++ b/clang/lib/Headers/avx10_2_512satcvtdsintrin.h @@ -0,0 +1,303 @@ +/*===----- avx10_2_512satcvtdsintrin.h - AVX10_2_512SATCVTDS intrinsics ----=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ +#ifndef __IMMINTRIN_H +#error \ + "Never use <avx10_2_512satcvtdsintrin.h> directly; include <immintrin.h> instead." +#endif + +#ifndef __AVX10_2_512SATCVTDSINTRIN_H +#define __AVX10_2_512SATCVTDSINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS \ + __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-512"), \ + __min_vector_width__(512))) + +// 512 bit : Double -> Int +static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttspd_epi32(__m512d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask( + (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvttspd_epi32(__m256i __W, __mmask8 __U, __m512d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask( + (__v8df)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttspd_epi32(__mmask8 __U, __m512d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask( + (__v8df)__A, (__v8si)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundpd_epi32(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8si)_mm256_undefined_si256(), \ + (__mmask8) - 1, (const int)(__R))) + +#define _mm512_mask_cvtts_roundpd_epi32(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8si)(__m256i)(__W), (__mmask8)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundpd_epi32(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2dqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8si)_mm256_setzero_si256(), (__mmask8)(__U), \ + (const int)(__R))) + +// 512 bit : Double -> uInt +static __inline__ __m256i __DEFAULT_FN_ATTRS _mm512_cvttspd_epu32(__m512d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask( + (__v8df)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_mask_cvttspd_epu32(__m256i __W, __mmask8 __U, __m512d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask( + (__v8df)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttspd_epu32(__mmask8 __U, __m512d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask( + (__v8df)__A, (__v8si)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundpd_epu32(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8si)_mm256_undefined_si256(), \ + (__mmask8) - 1, (const int)(__R))) + +#define _mm512_mask_cvtts_roundpd_epu32(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8si)(__m256i)(__W), (__mmask8)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundpd_epu32(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2udqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8si)_mm256_setzero_si256(), (__mmask8)(__U), \ + (const int)(__R))) + +// 512 bit : Double -> Long + +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttspd_epi64(__m512d __A) { + return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask( + (__v8df)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttspd_epi64(__m512i __W, __mmask8 __U, __m512d __A) { + return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask( + (__v8df)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttspd_epi64(__mmask8 __U, __m512d __A) { + return ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask( + (__v8df)__A, (__v8di)_mm512_setzero_si512(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundpd_epi64(__A, __R) \ + ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8di)_mm512_undefined_epi32(), \ + (__mmask8) - 1, (const int)(__R))) + +#define _mm512_mask_cvtts_roundpd_epi64(__W, __U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8di)(__m512i)(__W), (__mmask8)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundpd_epi64(__U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttpd2qqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8di)_mm512_setzero_si512(), (__mmask8)(__U), \ + (const int)(__R))) + +// 512 bit : Double -> ULong + +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttspd_epu64(__m512d __A) { + return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask( + (__v8df)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttspd_epu64(__m512i __W, __mmask8 __U, __m512d __A) { + return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask( + (__v8df)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttspd_epu64(__mmask8 __U, __m512d __A) { + return ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask( + (__v8df)__A, (__v8di)_mm512_setzero_si512(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundpd_epu64(__A, __R) \ + ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8di)_mm512_undefined_epi32(), \ + (__mmask8) - 1, (const int)(__R))) + +#define _mm512_mask_cvtts_roundpd_epu64(__W, __U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8di)(__m512i)(__W), (__mmask8)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundpd_epu64(__U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttpd2uqqs512_round_mask( \ + (__v8df)(__m512d)(__A), (__v8di)_mm512_setzero_si512(), (__mmask8)(__U), \ + (const int)(__R))) + +// 512 bit: Float -> int +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epi32(__m512 __A) { + return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask( + (__v16sf)(__A), (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttsps_epi32(__m512i __W, __mmask16 __U, __m512 __A) { + return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask( + (__v16sf)(__A), (__v16si)(__W), __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttsps_epi32(__mmask16 __U, __m512 __A) { + return ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask( + (__v16sf)(__A), (__v16si)_mm512_setzero_si512(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundps_epi32(__A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask( \ + (__v16sf)(__m512)(__A), (__v16si)_mm512_undefined_epi32(), \ + (__mmask16) - 1, (const int)(__R))) + +#define _mm512_mask_cvtts_roundps_epi32(__W, __U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask( \ + (__v16sf)(__m512)(__A), (__v16si)(__m512i)(__W), (__mmask16)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundps_epi32(__U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2dqs512_round_mask( \ + (__v16sf)(__m512)(__A), (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(__U), (const int)(__R))) + +// 512 bit: Float -> uint +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epu32(__m512 __A) { + return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask( + (__v16sf)(__A), (__v16si)_mm512_undefined_epi32(), (__mmask16)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttsps_epu32(__m512i __W, __mmask16 __U, __m512 __A) { + return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask( + (__v16sf)(__A), (__v16si)(__W), __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttsps_epu32(__mmask16 __U, __m512 __A) { + return ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask( + (__v16sf)(__A), (__v16si)_mm512_setzero_si512(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundps_epu32(__A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask( \ + (__v16sf)(__m512)(__A), (__v16si)_mm512_undefined_epi32(), \ + (__mmask16) - 1, (const int)(__R))) + +#define _mm512_mask_cvtts_roundps_epu32(__W, __U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask( \ + (__v16sf)(__m512)(__A), (__v16si)(__m512i)(__W), (__mmask16)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundps_epu32(__U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2udqs512_round_mask( \ + (__v16sf)(__m512)(__A), (__v16si)_mm512_setzero_si512(), \ + (__mmask16)(__U), (const int)(__R))) + +// 512 bit : float -> long +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epi64(__m256 __A) { + return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask( + (__v8sf)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttsps_epi64(__m512i __W, __mmask8 __U, __m256 __A) { + return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask( + (__v8sf)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttsps_epi64(__mmask8 __U, __m256 __A) { + return ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask( + (__v8sf)__A, (__v8di)_mm512_setzero_si512(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundps_epi64(__A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask( \ + (__v8sf)(__m256)(__A), (__v8di)_mm512_undefined_epi32(), (__mmask8) - 1, \ + (const int)(__R))) + +#define _mm512_mask_cvtts_roundps_epi64(__W, __U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask( \ + (__v8sf)(__m256)(__A), (__v8di)(__m512i)(__W), (__mmask8)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundps_epi64(__U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2qqs512_round_mask( \ + (__v8sf)(__m256)(__A), (__v8di)_mm512_setzero_si512(), (__mmask8)(__U), \ + (const int)(__R))) + +// 512 bit : float -> ulong +static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_cvttsps_epu64(__m256 __A) { + return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask( + (__v8sf)__A, (__v8di)_mm512_undefined_epi32(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_cvttsps_epu64(__m512i __W, __mmask8 __U, __m256 __A) { + return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask( + (__v8sf)__A, (__v8di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_cvttsps_epu64(__mmask8 __U, __m256 __A) { + return ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask( + (__v8sf)__A, (__v8di)_mm512_setzero_si512(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm512_cvtts_roundps_epu64(__A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask( \ + (__v8sf)(__m256)(__A), (__v8di)_mm512_undefined_epi32(), (__mmask8) - 1, \ + (const int)(__R))) + +#define _mm512_mask_cvtts_roundps_epu64(__W, __U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask( \ + (__v8sf)(__m256)(__A), (__v8di)(__m512i)(__W), (__mmask8)(__U), \ + (const int)(__R))) + +#define _mm512_maskz_cvtts_roundps_epu64(__U, __A, __R) \ + ((__m512i)__builtin_ia32_vcvttps2uqqs512_round_mask( \ + (__v8sf)(__m256)(__A), (__v8di)_mm512_setzero_si512(), (__mmask8)(__U), \ + (const int)(__R))) + +#undef __DEFAULT_FN_ATTRS +#endif // __AVX10_2_512SATCVTDSINTRIN_H diff --git a/clang/lib/Headers/avx10_2satcvtdsintrin.h b/clang/lib/Headers/avx10_2satcvtdsintrin.h new file mode 100644 index 0000000..5902843 --- /dev/null +++ b/clang/lib/Headers/avx10_2satcvtdsintrin.h @@ -0,0 +1,496 @@ +/*===----------- avx10_2satcvtdsintrin.h - AVX512SATCVTDS intrinsics --------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error \ + "Never use <avx10_2satcvtdsintrin.h> directly; include <immintrin.h> instead." +#endif // __IMMINTRIN_H + +#ifndef __AVX10_2SATCVTDSINTRIN_H +#define __AVX10_2SATCVTDSINTRIN_H + +/* Define the default attributes for the functions in this file. */ +#define __DEFAULT_FN_ATTRS256 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-256"), \ + __min_vector_width__(256))) + +#define __DEFAULT_FN_ATTRS128 \ + __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-256"), \ + __min_vector_width__(128))) + +#define _mm_cvtts_roundsd_i32(__A, __R) \ + ((int)__builtin_ia32_vcvttsd2sis32((__v2df)(__m128)(__A), (const int)(__R))) + +#define _mm_cvtts_roundsd_si32(__A, __R) \ + ((int)__builtin_ia32_vcvttsd2sis32((__v2df)(__m128d)(__A), (const int)(__R))) + +#define _mm_cvtts_roundsd_u32(__A, __R) \ + ((unsigned int)__builtin_ia32_vcvttsd2usis32((__v2df)(__m128d)(__A), \ + (const int)(__R))) + +#define _mm_cvtts_roundss_i32(__A, __R) \ + ((int)__builtin_ia32_vcvttss2sis32((__v4sf)(__m128)(__A), (const int)(__R))) + +#define _mm_cvtts_roundss_si32(__A, __R) \ + ((int)__builtin_ia32_vcvttss2sis32((__v4sf)(__m128)(__A), (const int)(__R))) + +#define _mm_cvtts_roundss_u32(__A, __R) \ + ((unsigned int)__builtin_ia32_vcvttss2usis32((__v4sf)(__m128)(__A), \ + (const int)(__R))) + +#ifdef __x86_64__ +#define _mm_cvtts_roundss_u64(__A, __R) \ + ((unsigned long long)__builtin_ia32_vcvttss2usis64((__v4sf)(__m128)(__A), \ + (const int)(__R))) + +#define _mm_cvtts_roundsd_u64(__A, __R) \ + ((unsigned long long)__builtin_ia32_vcvttsd2usis64((__v2df)(__m128d)(__A), \ + (const int)(__R))) + +#define _mm_cvtts_roundss_i64(__A, __R) \ + ((long long)__builtin_ia32_vcvttss2sis64((__v4sf)(__m128)(__A), \ + (const int)(__R))) + +#define _mm_cvtts_roundss_si64(__A, __R) \ + ((long long)__builtin_ia32_vcvttss2sis64((__v4sf)(__m128)(__A), \ + (const int)(__R))) + +#define _mm_cvtts_roundsd_si64(__A, __R) \ + ((long long)__builtin_ia32_vcvttsd2sis64((__v2df)(__m128d)(__A), \ + (const int)(__R))) + +#define _mm_cvtts_roundsd_i64(__A, __R) \ + ((long long)__builtin_ia32_vcvttsd2sis64((__v2df)(__m128d)(__A), \ + (const int)(__R))) +#endif /* __x86_64__ */ + +// 128 Bit : Double -> int +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epi32(__m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask( + (__v2df)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1))); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttspd_epi32(__m128i __W, __mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask((__v2df)__A, (__v4si)__W, + __U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttspd_epi32(__mmask16 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2dqs128_mask( + (__v2df)__A, (__v4si)(__m128i)_mm_setzero_si128(), __U)); +} + +// 256 Bit : Double -> int +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvttspd_epi32(__m256d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( + (__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttspd_epi32(__m128i __W, __mmask8 __U, __m256d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( + (__v4df)__A, (__v4si)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttspd_epi32(__mmask8 __U, __m256d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( + (__v4df)__A, (__v4si)_mm_setzero_si128(), __U, _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundpd_epi32(__A, __R) \ + ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \ + (__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_undefined_si128(), \ + (__mmask8) - 1, (int)(__R))) + +#define _mm256_mask_cvtts_roundpd_epi32(__W, __U, __A, __R) \ + ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \ + (__v4df)(__m256d)__A, (__v4si)(__m128i)__W, (__mmask8)__U, (int)(__R))) + +#define _mm256_maskz_cvtts_roundpd_epi32(__U, __A, __R) \ + ((__m128i)__builtin_ia32_vcvttpd2dqs256_round_mask( \ + (__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_setzero_si128(), \ + (__mmask8)__U, (int)(__R))) + +// 128 Bit : Double -> uint +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epu32(__m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask( + (__v2df)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1))); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttspd_epu32(__m128i __W, __mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask( + (__v2df)__A, (__v4si)(__m128i)__W, (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttspd_epu32(__mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2udqs128_mask( + (__v2df)__A, (__v4si)(__m128i)_mm_setzero_si128(), __U)); +} + +// 256 Bit : Double -> uint +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_cvttspd_epu32(__m256d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( + (__v4df)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttspd_epu32(__m128i __W, __mmask8 __U, __m256d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( + (__v4df)__A, (__v4si)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttspd_epu32(__mmask8 __U, __m256d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( + (__v4df)__A, (__v4si)_mm_setzero_si128(), __U, _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundpd_epu32(__A, __R) \ + ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \ + (__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_undefined_si128(), \ + (__mmask8) - 1, (int)(__R))) + +#define _mm256_mask_cvtts_roundpd_epu32(__W, __U, __A, __R) \ + ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \ + (__v4df)(__m256d)__A, (__v4si)(__m128i)__W, (__mmask8)__U, (int)(__R))) + +#define _mm256_maskz_cvtts_roundpd_epu32(__U, __A, __R) \ + ((__m128i)__builtin_ia32_vcvttpd2udqs256_round_mask( \ + (__v4df)(__m256d)__A, (__v4si)(__m128i)_mm_setzero_si128(), \ + (__mmask8)__U, (int)(__R))) + +// 128 Bit : Double -> long +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epi64(__m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask( + (__v2df)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttspd_epi64(__m128i __W, __mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask((__v2df)__A, (__v2di)__W, + (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttspd_epi64(__mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2qqs128_mask( + (__v2df)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U)); +} + +// 256 Bit : Double -> long +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttspd_epi64(__m256d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( + (__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttspd_epi64(__m256i __W, __mmask8 __U, __m256d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( + (__v4df)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttspd_epi64(__mmask8 __U, __m256d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( + (__v4df)__A, (__v4di)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundpd_epi64(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( \ + (__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \ + (int)__R)) + +#define _mm256_mask_cvtts_roundpd_epi64(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask((__v4df)__A, (__v4di)__W, \ + (__mmask8)__U, (int)__R)) + +#define _mm256_maskz_cvtts_roundpd_epi64(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2qqs256_round_mask( \ + (__v4df)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, (int)__R)) + +// 128 Bit : Double -> ulong +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttspd_epu64(__m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask( + (__v2df)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttspd_epu64(__m128i __W, __mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask((__v2df)__A, (__v2di)__W, + (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttspd_epu64(__mmask8 __U, __m128d __A) { + return ((__m128i)__builtin_ia32_vcvttpd2uqqs128_mask( + (__v2df)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U)); +} + +// 256 Bit : Double -> ulong + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttspd_epu64(__m256d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( + (__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttspd_epu64(__m256i __W, __mmask8 __U, __m256d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( + (__v4df)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttspd_epu64(__mmask8 __U, __m256d __A) { + return ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( + (__v4df)__A, (__v4di)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundpd_epu64(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \ + (__v4df)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \ + (int)__R)) + +#define _mm256_mask_cvtts_roundpd_epu64(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \ + (__v4df)__A, (__v4di)__W, (__mmask8)__U, (int)__R)) + +#define _mm256_maskz_cvtts_roundpd_epu64(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttpd2uqqs256_round_mask( \ + (__v4df)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, (int)__R)) + +// 128 Bit : float -> int +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epi32(__m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask( + (__v4sf)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1))); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttsps_epi32(__m128i __W, __mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask((__v4sf)__A, (__v4si)__W, + (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttsps_epi32(__mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2dqs128_mask( + (__v4sf)__A, (__v4si)(__m128i)_mm_setzero_si128(), (__mmask8)__U)); +} + +// 256 Bit : float -> int +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttsps_epi32(__m256 __A) { + return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( + (__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttsps_epi32(__m256i __W, __mmask8 __U, __m256 __A) { + return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( + (__v8sf)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttsps_epi32(__mmask8 __U, __m256 __A) { + return ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( + (__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundps_epi32(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \ + (__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_undefined_si256(), \ + (__mmask8) - 1, (int)(__R))) + +#define _mm256_mask_cvtts_roundps_epi32(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \ + (__v8sf)(__m256)__A, (__v8si)(__m256i)__W, (__mmask8)__U, (int)(__R))) + +#define _mm256_maskz_cvtts_roundps_epi32(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2dqs256_round_mask( \ + (__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_setzero_si256(), \ + (__mmask8)__U, (int)(__R))) + +// 128 Bit : float -> uint +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epu32(__m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask( + (__v4sf)__A, (__v4si)(__m128i)_mm_undefined_si128(), (__mmask8)(-1))); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttsps_epu32(__m128i __W, __mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask((__v4sf)__A, (__v4si)__W, + (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttsps_epu32(__mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2udqs128_mask( + (__v4sf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U)); +} + +// 256 Bit : float -> uint + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttsps_epu32(__m256 __A) { + return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( + (__v8sf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttsps_epu32(__m256i __W, __mmask8 __U, __m256 __A) { + return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( + (__v8sf)__A, (__v8si)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttsps_epu32(__mmask8 __U, __m256 __A) { + return ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( + (__v8sf)__A, (__v8si)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundps_epu32(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \ + (__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_undefined_si256(), \ + (__mmask8) - 1, (int)(__R))) + +#define _mm256_mask_cvtts_roundps_epu32(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \ + (__v8sf)(__m256)__A, (__v8si)(__m256i)__W, (__mmask8)__U, (int)(__R))) + +#define _mm256_maskz_cvtts_roundps_epu32(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2udqs256_round_mask( \ + (__v8sf)(__m256)__A, (__v8si)(__m256i)_mm256_setzero_si256(), \ + (__mmask8)__U, (int)(__R))) + +// 128 bit : float -> long +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epi64(__m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask( + (__v4sf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttsps_epi64(__m128i __W, __mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask( + (__v4sf)__A, (__v2di)(__m128i)__W, (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttsps_epi64(__mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2qqs128_mask( + (__v4sf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U)); +} +// 256 bit : float -> long + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttsps_epi64(__m128 __A) { + return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( + (__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttsps_epi64(__m256i __W, __mmask8 __U, __m128 __A) { + return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( + (__v4sf)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttsps_epi64(__mmask8 __U, __m128 __A) { + return ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( + (__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundps_epi64(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \ + (__v4sf)(__m128)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \ + (int)__R)) + +#define _mm256_mask_cvtts_roundps_epi64(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \ + (__v4sf)(__m128)__A, (__v4di)__W, (__mmask8)__U, (int)__R)) + +#define _mm256_maskz_cvtts_roundps_epi64(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2qqs256_round_mask( \ + (__v4sf)(__m128)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, \ + (int)__R)) + +// 128 bit : float -> ulong +static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttsps_epu64(__m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask( + (__v4sf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_mask_cvttsps_epu64(__m128i __W, __mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask( + (__v4sf)__A, (__v2di)(__m128i)__W, (__mmask8)__U)); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS128 +_mm_maskz_cvttsps_epu64(__mmask8 __U, __m128 __A) { + return ((__m128i)__builtin_ia32_vcvttps2uqqs128_mask( + (__v4sf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U)); +} +// 256 bit : float -> ulong + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_cvttsps_epu64(__m128 __A) { + return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( + (__v4sf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_mask_cvttsps_epu64(__m256i __W, __mmask8 __U, __m128 __A) { + return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( + (__v4sf)__A, (__v4di)__W, __U, _MM_FROUND_CUR_DIRECTION)); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS256 +_mm256_maskz_cvttsps_epu64(__mmask8 __U, __m128 __A) { + return ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( + (__v4sf)__A, (__v4di)_mm256_setzero_si256(), __U, + _MM_FROUND_CUR_DIRECTION)); +} + +#define _mm256_cvtts_roundps_epu64(__A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \ + (__v4sf)(__m128)__A, (__v4di)_mm256_undefined_si256(), (__mmask8) - 1, \ + (int)__R)) + +#define _mm256_mask_cvtts_roundps_epu64(__W, __U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \ + (__v4sf)(__m128)__A, (__v4di)__W, (__mmask8)__U, (int)__R)) + +#define _mm256_maskz_cvtts_roundps_epu64(__U, __A, __R) \ + ((__m256i)__builtin_ia32_vcvttps2uqqs256_round_mask( \ + (__v4sf)(__m128)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U, \ + (int)__R)) + +#undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256 +#endif // __AVX10_2SATCVTDSINTRIN_H diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h index 7a1edd9..d08dcd3 100644 --- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h +++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h @@ -1718,6 +1718,39 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_sqrt) float4 sqrt(float4); //===----------------------------------------------------------------------===// +// step builtins +//===----------------------------------------------------------------------===// + +/// \fn T step(T x, T y) +/// \brief Returns 1 if the x parameter is greater than or equal to the y +/// parameter; otherwise, 0. vector. \param x [in] The first floating-point +/// value to compare. \param y [in] The first floating-point value to compare. +/// +/// Step is based on the following formula: (x >= y) ? 1 : 0 + +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +half step(half, half); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +half2 step(half2, half2); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +half3 step(half3, half3); +_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2) +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +half4 step(half4, half4); + +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +float step(float, float); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +float2 step(float2, float2); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +float3 step(float3, float3); +_HLSL_BUILTIN_ALIAS(__builtin_hlsl_step) +float4 step(float4, float4); + +//===----------------------------------------------------------------------===// // tan builtins //===----------------------------------------------------------------------===// diff --git a/clang/lib/Headers/immintrin.h b/clang/lib/Headers/immintrin.h index 30fcc02..280154f 100644 --- a/clang/lib/Headers/immintrin.h +++ b/clang/lib/Headers/immintrin.h @@ -653,6 +653,7 @@ _storebe_i64(void * __P, long long __D) { #include <avx10_2convertintrin.h> #include <avx10_2minmaxintrin.h> #include <avx10_2niintrin.h> +#include <avx10_2satcvtdsintrin.h> #include <avx10_2satcvtintrin.h> #endif @@ -661,6 +662,7 @@ _storebe_i64(void * __P, long long __D) { #include <avx10_2_512convertintrin.h> #include <avx10_2_512minmaxintrin.h> #include <avx10_2_512niintrin.h> +#include <avx10_2_512satcvtdsintrin.h> #include <avx10_2_512satcvtintrin.h> #endif diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp index bdb3fc0..9188799 100644 --- a/clang/lib/Parse/ParseStmt.cpp +++ b/clang/lib/Parse/ParseStmt.cpp @@ -228,7 +228,7 @@ Retry: return StmtError(); } - // If the identifier was typo-corrected, try again. + // If the identifier was annotated, try again. if (Tok.isNot(tok::identifier)) goto Retry; } diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 4e44813..527718c 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -1747,6 +1747,18 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { SetElementTypeAsReturnType(&SemaRef, TheCall, getASTContext().IntTy); break; } + case Builtin::BI__builtin_hlsl_step: { + if (SemaRef.checkArgCount(TheCall, 2)) + return true; + if (CheckFloatOrHalfRepresentations(&SemaRef, TheCall)) + return true; + + ExprResult A = TheCall->getArg(0); + QualType ArgTyA = A.get()->getType(); + // return type is the same as the input type + TheCall->setType(ArgTyA); + break; + } // Note these are llvm builtins that we want to catch invalid intrinsic // generation. Normal handling of these builitns will occur elsewhere. case Builtin::BI__builtin_elementwise_bitreverse: { diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp index d21b8cb..4d11f2a 100644 --- a/clang/lib/Sema/SemaInit.cpp +++ b/clang/lib/Sema/SemaInit.cpp @@ -9548,7 +9548,7 @@ static void DiagnoseNarrowingInInitList(Sema &S, unsigned ConstRefDiagID, unsigned WarnDiagID) { unsigned DiagID; auto &L = S.getLangOpts(); - if (L.CPlusPlus11 && + if (L.CPlusPlus11 && !L.HLSL && (!L.MicrosoftExt || L.isCompatibleWithMSVC(LangOptions::MSVC2015))) DiagID = IsConstRef ? ConstRefDiagID : DefaultDiagID; else diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index ea72d3f..a155bb2 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -2067,7 +2067,7 @@ static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType, // There are no conversions between extended vector types, only identity. if (auto *ToExtType = ToType->getAs<ExtVectorType>()) { - if (auto *FromExtType = FromType->getAs<ExtVectorType>()) { + if (FromType->getAs<ExtVectorType>()) { // There are no conversions between extended vector types other than the // identity conversion. return false; diff --git a/clang/lib/Sema/SemaX86.cpp b/clang/lib/Sema/SemaX86.cpp index 233a068..6a4d78f 100644 --- a/clang/lib/Sema/SemaX86.cpp +++ b/clang/lib/Sema/SemaX86.cpp @@ -46,6 +46,14 @@ bool SemaX86::CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { case X86::BI__builtin_ia32_vcvttsh2si64: case X86::BI__builtin_ia32_vcvttsh2usi32: case X86::BI__builtin_ia32_vcvttsh2usi64: + case X86::BI__builtin_ia32_vcvttsd2sis32: + case X86::BI__builtin_ia32_vcvttsd2usis32: + case X86::BI__builtin_ia32_vcvttss2sis32: + case X86::BI__builtin_ia32_vcvttss2usis32: + case X86::BI__builtin_ia32_vcvttsd2sis64: + case X86::BI__builtin_ia32_vcvttsd2usis64: + case X86::BI__builtin_ia32_vcvttss2sis64: + case X86::BI__builtin_ia32_vcvttss2usis64: ArgNum = 1; break; case X86::BI__builtin_ia32_maxpd512: @@ -435,6 +443,24 @@ bool SemaX86::CheckBuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { ArgNum = 4; HasRC = true; break; + case X86::BI__builtin_ia32_vcvttpd2dqs256_round_mask: + case X86::BI__builtin_ia32_vcvttpd2dqs512_round_mask: + case X86::BI__builtin_ia32_vcvttpd2udqs256_round_mask: + case X86::BI__builtin_ia32_vcvttpd2udqs512_round_mask: + case X86::BI__builtin_ia32_vcvttpd2qqs256_round_mask: + case X86::BI__builtin_ia32_vcvttpd2qqs512_round_mask: + case X86::BI__builtin_ia32_vcvttpd2uqqs256_round_mask: + case X86::BI__builtin_ia32_vcvttpd2uqqs512_round_mask: + case X86::BI__builtin_ia32_vcvttps2dqs256_round_mask: + case X86::BI__builtin_ia32_vcvttps2dqs512_round_mask: + case X86::BI__builtin_ia32_vcvttps2udqs256_round_mask: + case X86::BI__builtin_ia32_vcvttps2udqs512_round_mask: + case X86::BI__builtin_ia32_vcvttps2qqs256_round_mask: + case X86::BI__builtin_ia32_vcvttps2qqs512_round_mask: + case X86::BI__builtin_ia32_vcvttps2uqqs256_round_mask: + case X86::BI__builtin_ia32_vcvttps2uqqs512_round_mask: + ArgNum = 3; + break; } llvm::APSInt Result; diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index 4bbc024..ff745b3 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -113,9 +113,13 @@ class TreeTransform { class ForgetPartiallySubstitutedPackRAII { Derived &Self; TemplateArgument Old; + // Set the pack expansion index to -1 to avoid pack substitution and + // indicate that parameter packs should be instantiated as themselves. + Sema::ArgumentPackSubstitutionIndexRAII ResetPackSubstIndex; public: - ForgetPartiallySubstitutedPackRAII(Derived &Self) : Self(Self) { + ForgetPartiallySubstitutedPackRAII(Derived &Self) + : Self(Self), ResetPackSubstIndex(Self.getSema(), -1) { Old = Self.ForgetPartiallySubstitutedPack(); } diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp index 2206137..8bb7880 100644 --- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp @@ -1129,7 +1129,7 @@ tryToInvalidateFReadBufferByElements(ProgramStateRef State, CheckerContext &C, if (!ElemTy.isNull() && CountVal && Size && StartIndexVal) { int64_t NumBytesRead = Size.value() * CountVal.value(); int64_t ElemSizeInChars = Ctx.getTypeSizeInChars(ElemTy).getQuantity(); - if (ElemSizeInChars == 0) + if (ElemSizeInChars == 0 || NumBytesRead < 0) return nullptr; bool IncompleteLastElement = (NumBytesRead % ElemSizeInChars) != 0; diff --git a/clang/test/AST/ByteCode/const-base-cast.cpp b/clang/test/AST/ByteCode/const-base-cast.cpp new file mode 100644 index 0000000..80226b9 --- /dev/null +++ b/clang/test/AST/ByteCode/const-base-cast.cpp @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm %s -o - -fexperimental-new-constant-interpreter | FileCheck %s + + +/// Slightly adapted to the version from test/CodeGenCXX/. + +struct X { int x[12];}; +struct A : X { char x, y, z; }; +struct B { char y; }; +struct C : A,B {}; +unsigned char x = ((char*)(X*)(C*)0x1000) - (char*)0x1000; +// CHECK: @x = {{(dso_local )?}}global i8 0 + +unsigned char y = ((char*)(B*)(C*)0x1000) - (char*)0x1000; +// CHECK: @y = {{(dso_local )?}}global i8 51 + +unsigned char z = ((char*)(A*)(C*)0x1000) - (char*)0x1000; +// CHECK: @z = {{(dso_local )?}}global i8 0 + diff --git a/clang/test/AST/ByteCode/cxx11.cpp b/clang/test/AST/ByteCode/cxx11.cpp index 481e3da..86b5828 100644 --- a/clang/test/AST/ByteCode/cxx11.cpp +++ b/clang/test/AST/ByteCode/cxx11.cpp @@ -169,3 +169,8 @@ namespace FinalLtorDiags { A<q> c; // both-error {{non-type template argument of type 'int *' is not a constant expression}} \ // both-note {{read of non-constexpr variable 'q' is not allowed in a constant expression}} } + +void lambdas() { + int d; + int a9[1] = {[d = 0] = 1}; // both-error {{not an integral constant expression}} +} diff --git a/clang/test/AST/HLSL/vector-constructors.hlsl b/clang/test/AST/HLSL/vector-constructors.hlsl index 905f11d..9161ad1 100644 --- a/clang/test/AST/HLSL/vector-constructors.hlsl +++ b/clang/test/AST/HLSL/vector-constructors.hlsl @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-compute -x hlsl -ast-dump -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-compute -ast-dump -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple dxil-pc-shadermodel6.0-compute -std=hlsl202x -ast-dump -o - %s | FileCheck %s typedef float float2 __attribute__((ext_vector_type(2))); typedef float float3 __attribute__((ext_vector_type(3))); diff --git a/clang/test/Analysis/fread.c b/clang/test/Analysis/fread.c index 3f28642..5dc6c0c 100644 --- a/clang/test/Analysis/fread.c +++ b/clang/test/Analysis/fread.c @@ -443,3 +443,33 @@ void test_unaligned_start_read(void) { fclose(fp); } } + +void no_crash_if_count_is_negative(long l, long r, unsigned char *buffer) { + FILE *fp = fopen("path", "r"); + if (fp) { + if (l * r == -1) { + fread(buffer, 1, l * r, fp); // no-crash + } + fclose(fp); + } +} + +void no_crash_if_size_is_negative(long l, long r, unsigned char *buffer) { + FILE *fp = fopen("path", "r"); + if (fp) { + if (l * r == -1) { + fread(buffer, l * r, 1, fp); // no-crash + } + fclose(fp); + } +} + +void no_crash_if_size_and_count_are_negative(long l, long r, unsigned char *buffer) { + FILE *fp = fopen("path", "r"); + if (fp) { + if (l * r == -1) { + fread(buffer, l * r, l * r, fp); // no-crash + } + fclose(fp); + } +} diff --git a/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-errors.c b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-errors.c new file mode 100644 index 0000000..c2e8912 --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-errors.c @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=i386-unknown-unknown -target-feature +avx10.2-512 -emit-llvm -Wall -Werror -verify + +#include <immintrin.h> +#include <stddef.h> + +__m256i test_mm512_cvtts_roundpd_epi32(__m512d A) { + return _mm512_cvtts_roundpd_epi32(A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm512_mask_cvtts_roundpd_epi32(__m256i W, __mmask8 U, __m512d A) { + return _mm512_mask_cvtts_roundpd_epi32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm512_maskz_cvtts_roundpd_epi32(__mmask8 U, __m512d A) { + return _mm512_maskz_cvtts_roundpd_epi32(U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm512_cvtts_roundpd_epu32(__m512d A) { + return _mm512_cvtts_roundpd_epu32(A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm512_mask_cvtts_roundpd_epu32(__m256i W, __mmask8 U, __m512d A) { + return _mm512_mask_cvtts_roundpd_epu32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm512_maskz_cvtts_roundpd_epu32(__mmask8 U, __m512d A) { + return _mm512_maskz_cvtts_roundpd_epu32(U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_cvtts_roundps_epi32(__m512 A) { + return _mm512_cvtts_roundps_epi32(A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_mask_cvtts_roundps_epi32(__m512i W, __mmask8 U, __m512 A) { + return _mm512_mask_cvtts_roundps_epi32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_maskz_cvtts_roundps_epi32(__mmask8 U, __m512 A) { + return _mm512_maskz_cvtts_roundps_epi32(U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_cvtts_roundps_epu32(__m512 A) { + return _mm512_cvtts_roundps_epu32(A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_mask_cvtts_roundps_epu32(__m512i W, __mmask8 U, __m512 A) { + return _mm512_mask_cvtts_roundps_epu32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_maskz_cvtts_roundps_epu32(__mmask8 U, __m512 A) { + return _mm512_maskz_cvtts_roundps_epu32(U, A, 22); // expected-error {{invalid rounding argument}} +}
\ No newline at end of file diff --git a/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-x64-error.c b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-x64-error.c new file mode 100755 index 0000000..2900256 --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-x64-error.c @@ -0,0 +1,76 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx10.2-512 -emit-llvm -Wall -Werror -verify + +#include <immintrin.h> +#include <stddef.h> + +long long test_mm_cvttssd_si64(__m128d __A) { + return _mm_cvtts_roundsd_si64(__A, 22); // expected-error {{invalid rounding argument}} +} + +long long test_mm_cvttssd_i64(__m128d __A) { + return _mm_cvtts_roundsd_i64(__A, 22); // expected-error {{invalid rounding argument}} +} + +unsigned long long test_mm_cvttssd_u64(__m128d __A) { + return _mm_cvtts_roundsd_u64(__A, 22); // expected-error {{invalid rounding argument}} +} + +float test_mm_cvttsss_i64(__m128 __A) { + return _mm_cvtts_roundss_i64(__A, 22); // expected-error {{invalid rounding argument}} +} + +long long test_mm_cvttsss_si64(__m128 __A) { + return _mm_cvtts_roundss_si64(__A, 22); // expected-error {{invalid rounding argument}} +} + +unsigned long long test_mm_cvttsss_u64(__m128 __A) { + return _mm_cvtts_roundss_u64(__A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_cvtts_roundpd_epi64(__m512d A) { + return _mm512_cvtts_roundpd_epi64( A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_mask_cvtts_roundpd_epi64(__m512i W, __mmask8 U, __m512d A) { + return _mm512_mask_cvtts_roundpd_epi64( W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_maskz_cvtts_roundpd_epi64(__mmask8 U, __m512d A) { + return _mm512_maskz_cvtts_roundpd_epi64( U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_cvtts_roundpd_epu64(__m512d A) { + return _mm512_cvtts_roundpd_epu64( A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_mask_cvtts_roundpd_epu64(__m512i W, __mmask8 U, __m512d A) { + return _mm512_mask_cvtts_roundpd_epu64( W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_maskz_cvtts_roundpd_epu64(__mmask8 U, __m512d A) { + return _mm512_maskz_cvtts_roundpd_epu64( U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_cvtts_roundps_epi64(__m256 A) { + return _mm512_cvtts_roundps_epi64( A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_mask_cvtts_roundps_epi64(__m512i W, __mmask8 U, __m256 A) { + return _mm512_mask_cvtts_roundps_epi64( W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_maskz_cvtts_roundps_epi64(__mmask8 U, __m256 A) { + return _mm512_maskz_cvtts_roundps_epi64( U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_cvtts_roundps_epu64(__m256 A) { + return _mm512_cvtts_roundps_epu64( A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_mask_cvtts_roundps_epu64(__m512i W, __mmask8 U, __m256 A) { + return _mm512_mask_cvtts_roundps_epu64( W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m512i test_mm512_maskz_cvtts_roundps_epu64(__mmask8 U, __m256 A) { + return _mm512_maskz_cvtts_roundps_epu64( U, A, 22); // expected-error {{invalid rounding argument}} +} diff --git a/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-x64.c b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-x64.c new file mode 100644 index 0000000..8c8959a --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins-x64.c @@ -0,0 +1,184 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx10.2-512 -emit-llvm -o - | FileCheck %s + +#include <immintrin.h> +#include <stddef.h> + +long long test_mm_cvttssd_si64(__m128d __A) { + // CHECK-LABEL: @test_mm_cvttssd_si64( + // CHECK: @llvm.x86.avx10.vcvttsd2sis64(<2 x double> + return _mm_cvtts_roundsd_si64(__A, _MM_FROUND_NO_EXC); +} + +long long test_mm_cvttssd_i64(__m128d __A) { + // CHECK-LABEL: @test_mm_cvttssd_i64( + // CHECK: @llvm.x86.avx10.vcvttsd2sis64(<2 x double> + return _mm_cvtts_roundsd_i64(__A, _MM_FROUND_NO_EXC); +} + +unsigned long long test_mm_cvttssd_u64(__m128d __A) { + // CHECK-LABEL: @test_mm_cvttssd_u64( + // CHECK: @llvm.x86.avx10.vcvttsd2usis64(<2 x double> + return _mm_cvtts_roundsd_u64(__A, _MM_FROUND_NO_EXC); +} + +float test_mm_cvttsss_i64(__m128 __A) { + // CHECK-LABEL: @test_mm_cvttsss_i64( + // CHECK: @llvm.x86.avx10.vcvttss2sis64(<4 x float> + return _mm_cvtts_roundss_i64(__A, _MM_FROUND_NO_EXC); +} + +long long test_mm_cvttsss_si64(__m128 __A) { + // CHECK-LABEL: @test_mm_cvttsss_si64( + // CHECK: @llvm.x86.avx10.vcvttss2sis64(<4 x float> + return _mm_cvtts_roundss_si64(__A, _MM_FROUND_NO_EXC); +} + +unsigned long long test_mm_cvttsss_u64(__m128 __A) { + // CHECK-LABEL: @test_mm_cvttsss_u64( + // CHECK: @llvm.x86.avx10.vcvttss2usis64(<4 x float> + return _mm_cvtts_roundss_u64(__A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_cvttspd_epi64(__m512d A) { + // CHECK-LABEL: test_mm512_cvttspd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> + return _mm512_cvttspd_epi64(A); +} + +__m512i test_mm512_mask_cvttspd_epi64(__m512i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvttspd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> + return _mm512_mask_cvttspd_epi64(W, U, A); +} + +__m512i test_mm512_maskz_cvttspd_epi64(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvttspd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> + return _mm512_maskz_cvttspd_epi64(U, A); +} + +__m512i test_mm512_cvtts_roundpd_epi64(__m512d A) { + // CHECK-LABEL: test_mm512_cvtts_roundpd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> + return _mm512_cvtts_roundpd_epi64(A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_mask_cvtts_roundpd_epi64(__m512i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundpd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> + return _mm512_mask_cvtts_roundpd_epi64(W, U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_maskz_cvtts_roundpd_epi64(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundpd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> + return _mm512_maskz_cvtts_roundpd_epi64(U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_cvttspd_epu64(__m512d A) { + // CHECK-LABEL: test_mm512_cvttspd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> + return _mm512_cvttspd_epu64(A); +} + +__m512i test_mm512_mask_cvttspd_epu64(__m512i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvttspd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> + return _mm512_mask_cvttspd_epu64(W, U, A); +} + +__m512i test_mm512_maskz_cvttspd_epu64(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvttspd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> + return _mm512_maskz_cvttspd_epu64(U, A); +} + +__m512i test_mm512_cvtts_roundpd_epu64(__m512d A) { + // CHECK-LABEL: test_mm512_cvtts_roundpd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> + return _mm512_cvtts_roundpd_epu64(A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_mask_cvtts_roundpd_epu64(__m512i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundpd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> + return _mm512_mask_cvtts_roundpd_epu64(W, U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_maskz_cvtts_roundpd_epu64(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundpd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> + return _mm512_maskz_cvtts_roundpd_epu64(U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_cvttsps_epi64(__m256 A) { + // CHECK-LABEL: test_mm512_cvttsps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> + return _mm512_cvttsps_epi64(A); +} + +__m512i test_mm512_mask_cvttsps_epi64(__m512i W, __mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_mask_cvttsps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> + return _mm512_mask_cvttsps_epi64(W, U, A); +} + +__m512i test_mm512_maskz_cvttsps_epi64(__mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_maskz_cvttsps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> + return _mm512_maskz_cvttsps_epi64(U, A); +} + +__m512i test_mm512_cvtts_roundps_epi64(__m256 A) { + // CHECK-LABEL: test_mm512_cvtts_roundps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> + return _mm512_cvtts_roundps_epi64(A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_mask_cvtts_roundps_epi64(__m512i W, __mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> + return _mm512_mask_cvtts_roundps_epi64(W, U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_maskz_cvtts_roundps_epi64(__mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> + return _mm512_maskz_cvtts_roundps_epi64(U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_cvttsps_epu64(__m256 A) { + // CHECK-LABEL: test_mm512_cvttsps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> + return _mm512_cvttsps_epu64(A); +} + +__m512i test_mm512_mask_cvttsps_epu64(__m512i W, __mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_mask_cvttsps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> + return _mm512_mask_cvttsps_epu64(W, U, A); +} + +__m512i test_mm512_maskz_cvttsps_epu64(__mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_maskz_cvttsps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> + return _mm512_maskz_cvttsps_epu64(U, A); +} + +__m512i test_mm512_cvtts_roundps_epu64(__m256 A) { + // CHECK-LABEL: test_mm512_cvtts_roundps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> + return _mm512_cvtts_roundps_epu64(A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_mask_cvtts_roundps_epu64(__m512i W, __mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> + return _mm512_mask_cvtts_roundps_epu64(W, U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_maskz_cvtts_roundps_epu64(__mmask8 U, __m256 A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> + return _mm512_maskz_cvtts_roundps_epu64(U, A, _MM_FROUND_NO_EXC); +} diff --git a/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins.c b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins.c new file mode 100644 index 0000000..cccee04 --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2_512satcvtds-builtins.c @@ -0,0 +1,151 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=i386 -target-feature +avx10.2-512 -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,X86 +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64 -target-feature +avx10.2-512 -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,X64 + +#include <immintrin.h> +#include <stddef.h> + +__m256i test_mm512_cvttspd_epi32(__m512d A) { + // CHECK-LABEL: test_mm512_cvttspd_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> + return _mm512_cvttspd_epi32(A); +} + +__m256i test_mm512_mask_cvttspd_epi32(__m256i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvttspd_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> + return _mm512_mask_cvttspd_epi32(W, U, A); +} + +__m256i test_mm512_maskz_cvttspd_epi32(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvttspd_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> + return _mm512_maskz_cvttspd_epi32(U, A); +} + +__m256i test_mm512_cvtts_roundpd_epi32(__m512d A) { + // CHECK-LABEL: test_mm512_cvtts_roundpd_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> + return _mm512_cvtts_roundpd_epi32(A, _MM_FROUND_NO_EXC); +} + +__m256i test_mm512_mask_cvtts_roundpd_epi32(__m256i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundpd_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> + return _mm512_mask_cvtts_roundpd_epi32(W, U, A, _MM_FROUND_NO_EXC); +} + +__m256i test_mm512_maskz_cvtts_roundpd_epi32(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundpd_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> + return _mm512_maskz_cvtts_roundpd_epi32(U, A, _MM_FROUND_NO_EXC); +} + +__m256i test_mm512_cvttspd_epu32(__m512d A) { + // CHECK-LABEL: test_mm512_cvttspd_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> + return _mm512_cvttspd_epu32(A); +} + +__m256i test_mm512_mask_cvttspd_epu32(__m256i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvttspd_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> + return _mm512_mask_cvttspd_epu32(W, U, A); +} + +__m256i test_mm512_maskz_cvttspd_epu32(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvttspd_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> + return _mm512_maskz_cvttspd_epu32(U, A); +} + +__m256i test_mm512_cvtts_roundpd_epu32(__m512d A) { + // CHECK-LABEL: test_mm512_cvtts_roundpd_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> + return _mm512_cvtts_roundpd_epu32(A, _MM_FROUND_NO_EXC); +} + +__m256i test_mm512_mask_cvtts_roundpd_epu32(__m256i W, __mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundpd_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> + return _mm512_mask_cvtts_roundpd_epu32(W, U, A, _MM_FROUND_NO_EXC); +} + +__m256i test_mm512_maskz_cvtts_roundpd_epu32(__mmask8 U, __m512d A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundpd_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> + return _mm512_maskz_cvtts_roundpd_epu32(U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_cvttsps_epi32(__m512 A) { + // CHECK-LABEL: test_mm512_cvttsps_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> + return _mm512_cvttsps_epi32(A); +} + +__m512i test_mm512_mask_cvttsps_epi32(__m512i W, __mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_mask_cvttsps_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> + return _mm512_mask_cvttsps_epi32(W, U, A); +} + +__m512i test_mm512_maskz_cvttsps_epi32(__mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_maskz_cvttsps_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> + return _mm512_maskz_cvttsps_epi32(U, A); +} + +__m512i test_mm512_cvtts_roundps_epi32(__m512 A) { + // CHECK-LABEL: test_mm512_cvtts_roundps_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> + return _mm512_cvtts_roundps_epi32(A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_mask_cvtts_roundps_epi32(__m512i W, __mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundps_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> + return _mm512_mask_cvtts_roundps_epi32(W, U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_maskz_cvtts_roundps_epi32(__mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundps_epi32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> + return _mm512_maskz_cvtts_roundps_epi32(U, A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_cvttsps_epu32(__m512 A) { + // CHECK-LABEL: test_mm512_cvttsps_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> + return _mm512_cvttsps_epu32(A); +} + +__m512i test_mm512_mask_cvttsps_epu32(__m512i W, __mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_mask_cvttsps_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> + return _mm512_mask_cvttsps_epu32(W, U, A); +} + +__m512i test_mm512_maskz_cvttsps_epu32(__mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_maskz_cvttsps_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> + return _mm512_maskz_cvttsps_epu32(U, A); +} + +__m512i test_mm512_cvtts_roundps_epu32(__m512 A) { + // CHECK-LABEL: test_mm512_cvtts_roundps_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> + return _mm512_cvtts_roundps_epu32(A, _MM_FROUND_NO_EXC); +} + +__m512i test_mm512_mask_cvtts_roundps_epu32(__m512i W, __mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_mask_cvtts_roundps_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> + return _mm512_mask_cvtts_roundps_epu32(W, U, A, _MM_FROUND_NO_EXC); +} +__m512i test_mm512_maskz_cvtts_roundps_epu32(__mmask8 U, __m512 A) { + // CHECK-LABEL: test_mm512_maskz_cvtts_roundps_epu32 + // CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> + return _mm512_maskz_cvtts_roundps_epu32(U, A, _MM_FROUND_NO_EXC); +} + +// X64: {{.*}} +// X86: {{.*}}
\ No newline at end of file diff --git a/clang/test/CodeGen/X86/avx10_2satcvtds-builtins-errors.c b/clang/test/CodeGen/X86/avx10_2satcvtds-builtins-errors.c new file mode 100644 index 0000000..72d2769 --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2satcvtds-builtins-errors.c @@ -0,0 +1,57 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=i386-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -Wall -Werror -verify + +unsigned long long test_mm_cvttssd(unsigned long long __A) { + return _mm_cvttssd(__A); // expected-error {{call to undeclared function '_mm_cvttssd'}} +} + +unsigned long long test_mm_cvttsss(unsigned long long __A) { + return _mm_cvttsss(__A); // expected-error {{call to undeclared function '_mm_cvttsss'}} +} + +#include <immintrin.h> +#include <stddef.h> + +__m128i test_mm256_cvtts_roundpd_epi32(__m256d A) { + return _mm256_cvtts_roundpd_epi32(A, 22); // expected-error {{invalid rounding argument}} +} +__m128i test_mm256_mask_cvtts_roundpd_epi32(__m128i W, __mmask8 U, __m256d A) { + return _mm256_mask_cvtts_roundpd_epi32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m128i test_mm256_maskz_cvtts_roundpd_epi32(__mmask8 U, __m256d A) { + return _mm256_maskz_cvtts_roundpd_epi32(U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m128i test_mm256_cvtts_roundpd_epu32(__m256d A) { + return _mm256_cvtts_roundpd_epu32(A, 22); // expected-error {{invalid rounding argument}} +} +__m128i test_mm256_mask_cvtts_roundpd_epu32(__m128i W, __mmask8 U, __m256d A) { + return _mm256_mask_cvtts_roundpd_epu32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m128i test_mm256_maskz_cvtts_roundpd_epu32(__mmask8 U, __m256d A) { + return _mm256_maskz_cvtts_roundpd_epu32(U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm256_cvtts_roundps_epi32(__m256 A) { + return _mm256_cvtts_roundps_epi32(A, 22); // expected-error {{invalid rounding argument}} +} +__m256i test_mm256_mask_cvtts_roundps_epi32(__m256i W, __mmask8 U, __m256 A) { + return _mm256_mask_cvtts_roundps_epi32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm256_maskz_cvtts_roundps_epi32(__mmask8 U, __m256 A) { + return _mm256_maskz_cvtts_roundps_epi32(U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm256_cvtts_roundps_epu32(__m256 A) { + return _mm256_cvtts_roundps_epu32(A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm256_mask_cvtts_roundps_epu32(__m256i W, __mmask8 U, __m256 A) { + return _mm256_mask_cvtts_roundps_epu32(W, U, A, 22); // expected-error {{invalid rounding argument}} +} + +__m256i test_mm256_maskz_cvtts_roundps_epu32(__mmask8 U, __m256 A) { + return _mm256_maskz_cvtts_roundps_epu32(U, A, 22); // expected-error {{invalid rounding argument}} +} diff --git a/clang/test/CodeGen/X86/avx10_2satcvtds-builtins-x64.c b/clang/test/CodeGen/X86/avx10_2satcvtds-builtins-x64.c new file mode 100644 index 0000000..0038473 --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2satcvtds-builtins-x64.c @@ -0,0 +1,262 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64-unknown-unknown -target-feature +avx10.2-256 -emit-llvm -o - | FileCheck %s + +#include <immintrin.h> +#include <stddef.h> + +// scalar + +int test_mm_cvttssd_i32(__m128d __A) { + // CHECK-LABEL: @test_mm_cvttssd_i32 + // CHECK: @llvm.x86.avx10.vcvttsd2sis + return _mm_cvtts_roundsd_i32(__A, _MM_FROUND_NO_EXC); +} + +int test_mm_cvttssd_si32(__m128d __A) { + // CHECK-LABEL: @test_mm_cvttssd_si32( + // CHECK: @llvm.x86.avx10.vcvttsd2sis(<2 x double> + return _mm_cvtts_roundsd_si32(__A, _MM_FROUND_NO_EXC); +} + +unsigned test_mm_cvttssd_u32(__m128d __A) { + // CHECK-LABEL: @test_mm_cvttssd_u32( + // CHECK: @llvm.x86.avx10.vcvttsd2usis(<2 x double> + return _mm_cvtts_roundsd_u32(__A, _MM_FROUND_NO_EXC); +} + +int test_mm_cvttsss_i32(__m128 __A) { + // CHECK-LABEL: @test_mm_cvttsss_i32( + // CHECK: @llvm.x86.avx10.vcvttss2sis(<4 x float> + return _mm_cvtts_roundss_i32(__A, _MM_FROUND_NO_EXC); +} + +int test_mm_cvttsss_si32(__m128 __A) { + // CHECK-LABEL: @test_mm_cvttsss_si32( + // CHECK: @llvm.x86.avx10.vcvttss2sis(<4 x float> + return _mm_cvtts_roundss_si32(__A, _MM_FROUND_NO_EXC); +} + +unsigned test_mm_cvttsss_u32(__m128 __A) { + // CHECK-LABEL: @test_mm_cvttsss_u32( + // CHECK: @llvm.x86.avx10.vcvttss2usis(<4 x float> + return _mm_cvtts_roundss_u32(__A, _MM_FROUND_NO_EXC); +} + +// vector +// 128 bit +__m128i test_mm_cvttspd_epi64(__m128d A){ + // CHECK-LABEL: @test_mm_cvttspd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.128(<2 x double> + return _mm_cvttspd_epi64(A); +} + +__m128i test_mm_mask_cvttspd_epi64(__m128i W, __mmask8 U, __m128d A){ + // CHECK-LABEL: @test_mm_mask_cvttspd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.128(<2 x double> + return _mm_mask_cvttspd_epi64(W, U, A); +} + +__m128i test_mm_maskz_cvttspd_epi64(__mmask8 U,__m128d A){ + // CHECK-LABEL: @test_mm_maskz_cvttspd_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.128(<2 x double> + return _mm_maskz_cvttspd_epi64(U, A); +} + +__m128i test_mm_cvttspd_epu64(__m128d A){ + // CHECK-LABEL: @test_mm_cvttspd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.128(<2 x double> + return _mm_cvttspd_epu64(A); +} + +__m128i test_mm_mask_cvttspd_epu64(__m128i W, __mmask8 U, __m128d A){ + // CHECK-LABEL: @test_mm_mask_cvttspd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.128(<2 x double> + return _mm_mask_cvttspd_epu64(W, U, A); +} + +__m128i test_mm_maskz_cvttspd_epu64(__mmask8 U,__m128d A){ + // CHECK-LABEL: @test_mm_maskz_cvttspd_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.128(<2 x double> + return _mm_maskz_cvttspd_epu64(U, A); +} + +// 256 bit +__m256i test_mm256_cvttspd_epi64(__m256d A){ +// CHECK-LABEL: @test_mm256_cvttspd_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double> + return _mm256_cvttspd_epi64(A); +} + +__m256i test_mm256_mask_cvttspd_epi64(__m256i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvttspd_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double> + return _mm256_mask_cvttspd_epi64(W,U, A); +} + +__m256i test_mm256_maskz_cvttspd_epi64(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvttspd_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double> + return _mm256_maskz_cvttspd_epi64(U, A); +} + +__m256i test_mm256_cvtts_roundpd_epi64(__m256d A){ +// CHECK-LABEL: @test_mm256_cvtts_roundpd_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double> + return _mm256_cvtts_roundpd_epi64(A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_mask_cvtts_roundpd_epi64(__m256i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundpd_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double> + return _mm256_mask_cvtts_roundpd_epi64(W,U,A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_maskz_cvtts_roundpd_epi64(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundpd_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double> + return _mm256_maskz_cvtts_roundpd_epi64(U,A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_cvttspd_epu64(__m256d A){ +// CHECK-LABEL: @test_mm256_cvttspd_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double> + return _mm256_cvttspd_epu64(A); +} + +__m256i test_mm256_mask_cvttspd_epu64(__m256i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvttspd_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double> + return _mm256_mask_cvttspd_epu64(W,U, A); +} + +__m256i test_mm256_maskz_cvttspd_epu64(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvttspd_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double> + return _mm256_maskz_cvttspd_epu64(U, A); +} + +__m256i test_mm256_cvtts_roundpd_epu64(__m256d A){ +// CHECK-LABEL: @test_mm256_cvtts_roundpd_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double> + return _mm256_cvtts_roundpd_epu64(A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_mask_cvtts_roundpd_epu64(__m256i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundpd_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double> + return _mm256_mask_cvtts_roundpd_epu64(W,U,A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_maskz_cvtts_roundpd_epu64(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundpd_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double> + return _mm256_maskz_cvtts_roundpd_epu64(U,A,_MM_FROUND_NEARBYINT ); +} + +// 128 bit +__m128i test_mm_cvttsps_epi64(__m128 A){ + // CHECK-LABEL: @test_mm_cvttsps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.128(<4 x float> + return _mm_cvttsps_epi64(A); +} + +__m128i test_mm_mask_cvttsps_epi64(__m128i W, __mmask8 U, __m128 A){ + // CHECK-LABEL: @test_mm_mask_cvttsps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.128(<4 x float> + return _mm_mask_cvttsps_epi64(W, U, A); +} + +__m128i test_mm_maskz_cvttsps_epi64(__mmask8 U,__m128 A){ + // CHECK-LABEL: @test_mm_maskz_cvttsps_epi64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.128(<4 x float> + return _mm_maskz_cvttsps_epi64(U, A); +} + +__m128i test_mm_cvttsps_epu64(__m128 A){ + // CHECK-LABEL: @test_mm_cvttsps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.128(<4 x float> + return _mm_cvttsps_epu64(A); +} + +__m128i test_mm_mask_cvttsps_epu64(__m128i W, __mmask8 U, __m128 A){ + // CHECK-LABEL: @test_mm_mask_cvttsps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.128(<4 x float> + return _mm_mask_cvttsps_epu64(W, U, A); +} + +__m128i test_mm_maskz_cvttsps_epu64(__mmask8 U,__m128 A){ + // CHECK-LABEL: @test_mm_maskz_cvttsps_epu64 + // CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.128(<4 x float> + return _mm_maskz_cvttsps_epu64(U, A); +} + +__m256i test_mm256_cvttsps_epi64(__m128 A){ +// CHECK-LABEL: @test_mm256_cvttsps_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float> + return _mm256_cvttsps_epi64(A); +} + +__m256i test_mm256_mask_cvttsps_epi64(__m256i W,__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_mask_cvttsps_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float> + return _mm256_mask_cvttsps_epi64(W,U, A); +} + +__m256i test_mm256_maskz_cvttsps_epi64(__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_maskz_cvttsps_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float> + return _mm256_maskz_cvttsps_epi64(U, A); +} + +__m256i test_mm256_cvtts_roundps_epi64(__m128 A){ +// CHECK-LABEL: @test_mm256_cvtts_roundps_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float> + return _mm256_cvtts_roundps_epi64(A, _MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_mask_cvtts_roundps_epi64(__m256i W,__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundps_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float> + return _mm256_mask_cvtts_roundps_epi64(W,U,A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_maskz_cvtts_roundps_epi64(__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundps_epi64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float> + return _mm256_maskz_cvtts_roundps_epi64(U,A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_cvttsps_epu64(__m128 A){ +// CHECK-LABEL: @test_mm256_cvttsps_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float> + return _mm256_cvttsps_epu64(A); +} + +__m256i test_mm256_mask_cvttsps_epu64(__m256i W,__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_mask_cvttsps_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float> + return _mm256_mask_cvttsps_epu64(W,U, A); +} + +__m256i test_mm256_maskz_cvttsps_epu64(__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_maskz_cvttsps_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float> + return _mm256_maskz_cvttsps_epu64(U, A); +} + +__m256i test_mm256_cvtts_roundps_epu64(__m128 A){ +// CHECK-LABEL: @test_mm256_cvtts_roundps_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float> + return _mm256_cvtts_roundps_epu64(A, _MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_mask_cvtts_roundps_epu64(__m256i W,__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundps_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float> + return _mm256_mask_cvtts_roundps_epu64(W,U,A,_MM_FROUND_NEARBYINT ); +} + +__m256i test_mm256_maskz_cvtts_roundps_epu64(__mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundps_epu64 +// CHECK: @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float> + return _mm256_maskz_cvtts_roundps_epu64(U,A,_MM_FROUND_NEARBYINT ); +} diff --git a/clang/test/CodeGen/X86/avx10_2satcvtds-builtins.c b/clang/test/CodeGen/X86/avx10_2satcvtds-builtins.c new file mode 100644 index 0000000..bb90f6a --- /dev/null +++ b/clang/test/CodeGen/X86/avx10_2satcvtds-builtins.c @@ -0,0 +1,225 @@ +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=i386 -target-feature +avx10.2-256 -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,X86 +// RUN: %clang_cc1 -flax-vector-conversions=none -ffreestanding %s -triple=x86_64 -target-feature +avx10.2-256 -emit-llvm -o - | FileCheck %s --check-prefixes=CHECK,X64 + +#include <immintrin.h> +#include <stddef.h> + +__m128i test_mm_cvttspd_epi32(__m128d A){ +// CHECK-LABEL: @test_mm_cvttspd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.128(<2 x double> + return _mm_cvttspd_epi32(A); +} + +__m128i test_mm_mask_cvttspd_epi32(__m128i W, __mmask8 U, __m128d A){ +// CHECK-LABEL: @test_mm_mask_cvttspd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.128(<2 x double> + return _mm_mask_cvttspd_epi32(W,U,A); +} + +__m128i test_mm_maskz_cvttspd_epi32( __mmask8 U, __m128d A){ +// CHECK-LABEL: @test_mm_maskz_cvttspd_epi32( +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.128(<2 x double> + return _mm_maskz_cvttspd_epi32(U,A); +} + +__m128i test_mm256_cvttspd_epi32(__m256d A){ +// CHECK-LABEL: @test_mm256_cvttspd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double> + return _mm256_cvttspd_epi32(A); +} + +__m128i test_mm256_mask_cvttspd_epi32(__m128i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvttspd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double> + return _mm256_mask_cvttspd_epi32(W,U,A); +} + +__m128i test_mm256_maskz_cvttspd_epi32(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvttspd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double> + return _mm256_maskz_cvttspd_epi32(U,A); +} + +__m128i test_mm256_cvtts_roundpd_epi32(__m256d A){ +// CHECK-LABEL: @test_mm256_cvtts_roundpd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double> + return _mm256_cvtts_roundpd_epi32(A, _MM_FROUND_NEARBYINT); +} + +__m128i test_mm256_mask_cvtts_roundpd_epi32(__m128i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundpd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double> + return _mm256_mask_cvtts_roundpd_epi32(W,U,A,_MM_FROUND_NEARBYINT); +} + +__m128i test_mm256_maskz_cvtts_roundpd_epi32(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundpd_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double> + return _mm256_maskz_cvtts_roundpd_epi32(U,A,_MM_FROUND_NEARBYINT); +} + +__m128i test_mm_cvttspd_epu32(__m128d A){ +// CHECK-LABEL: @test_mm_cvttspd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.128(<2 x double> + return _mm_cvttspd_epu32(A); +} + +__m128i test_mm_mask_cvttspd_epu32(__m128i W, __mmask8 U, __m128d A){ +// CHECK-LABEL: @test_mm_mask_cvttspd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.128(<2 x double> + return _mm_mask_cvttspd_epu32(W,U,A); +} + +__m128i test_mm_maskz_cvttspd_epu32( __mmask8 U, __m128d A){ +// CHECK-LABEL: @test_mm_maskz_cvttspd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.128(<2 x double> + return _mm_maskz_cvttspd_epu32(U,A); +} + + +__m128i test_mm256_cvttspd_epu32(__m256d A){ +// CHECK-LABEL: @test_mm256_cvttspd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double> + return _mm256_cvttspd_epu32(A); +} + +__m128i test_mm256_mask_cvttspd_epu32(__m128i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvttspd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double> + return _mm256_mask_cvttspd_epu32(W,U,A); +} + +__m128i test_mm256_maskz_cvttspd_epu32(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvttspd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double> + return _mm256_maskz_cvttspd_epu32(U,A); +} + +__m128i test_mm256_cvtts_roundpd_epu32(__m256d A){ +// CHECK-LABEL: @test_mm256_cvtts_roundpd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double> + return _mm256_cvtts_roundpd_epu32(A, _MM_FROUND_NEARBYINT); +} + +__m128i test_mm256_mask_cvtts_roundpd_epu32(__m128i W,__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundpd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double> + return _mm256_mask_cvtts_roundpd_epu32(W,U,A,_MM_FROUND_NEARBYINT); +} + +__m128i test_mm256_maskz_cvtts_roundpd_epu32(__mmask8 U, __m256d A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundpd_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double> + return _mm256_maskz_cvtts_roundpd_epu32(U,A,_MM_FROUND_NEARBYINT); +} + +__m128i test_mm_cvttsps_epi32(__m128 A){ +// CHECK-LABEL: @test_mm_cvttsps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.128(<4 x float> + return _mm_cvttsps_epi32(A); +} + +__m128i test_mm_mask_cvttsps_epi32(__m128i W, __mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm_mask_cvttsps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.128(<4 x float> + return _mm_mask_cvttsps_epi32(W,U,A); +} + +__m128i test_mm_maskz_cvttsps_epi32( __mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm_maskz_cvttsps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.128(<4 x float> + return _mm_maskz_cvttsps_epi32(U,A); +} + +__m256i test_mm256_cvttsps_epi32(__m256 A){ +// CHECK-LABEL: @test_mm256_cvttsps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float> + return _mm256_cvttsps_epi32(A); +} + +__m256i test_mm256_mask_cvttsps_epi32(__m256i W,__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_mask_cvttsps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float> + return _mm256_mask_cvttsps_epi32(W,U,A); +} + +__m256i test_mm256_maskz_cvttsps_epi32(__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_maskz_cvttsps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float> + return _mm256_maskz_cvttsps_epi32(U,A); +} + +__m256i test_mm256_cvtts_roundps_epi32(__m256 A){ +// CHECK-LABEL: @test_mm256_cvtts_roundps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float> + return _mm256_cvtts_roundps_epi32(A, _MM_FROUND_NEARBYINT); +} + +__m256i test_mm256_mask_cvtts_roundps_epi32(__m256i W,__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float> + return _mm256_mask_cvtts_roundps_epi32(W,U,A,_MM_FROUND_NEARBYINT); +} + +__m256i test_mm256_maskz_cvtts_roundps_epi32(__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundps_epi32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float> + return _mm256_maskz_cvtts_roundps_epi32(U,A,_MM_FROUND_NEARBYINT); +} + +__m128i test_mm_cvttsps_epu32(__m128 A){ +// CHECK-LABEL: @test_mm_cvttsps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.128(<4 x float> + return _mm_cvttsps_epu32(A); +} + +__m128i test_mm_mask_cvttsps_epu32(__m128i W, __mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm_mask_cvttsps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.128(<4 x float> + return _mm_mask_cvttsps_epu32(W,U,A); +} + +__m128i test_mm_maskz_cvttsps_epu32( __mmask8 U, __m128 A){ +// CHECK-LABEL: @test_mm_maskz_cvttsps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.128(<4 x float> + return _mm_maskz_cvttsps_epu32(U,A); +} + +__m256i test_mm256_cvttsps_epu32(__m256 A){ +// CHECK-LABEL: @test_mm256_cvttsps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float> + return _mm256_cvttsps_epu32(A); +} + +__m256i test_mm256_mask_cvttsps_epu32(__m256i W,__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_mask_cvttsps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float> + return _mm256_mask_cvttsps_epu32(W,U,A); +} + +__m256i test_mm256_maskz_cvttsps_epu32(__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_maskz_cvttsps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float> + return _mm256_maskz_cvttsps_epu32(U,A); +} + +__m256i test_mm256_cvtts_roundps_epu32(__m256 A){ +// CHECK-LABEL: @test_mm256_cvtts_roundps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float> + return _mm256_cvtts_roundps_epu32(A, _MM_FROUND_NEARBYINT); +} + +__m256i test_mm256_mask_cvtts_roundps_epu32(__m256i W,__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_mask_cvtts_roundps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float> + return _mm256_mask_cvtts_roundps_epu32(W,U,A,_MM_FROUND_NEARBYINT); +} + +__m256i test_mm256_maskz_cvtts_roundps_epu32(__mmask8 U, __m256 A){ +// CHECK-LABEL: @test_mm256_maskz_cvtts_roundps_epu32 +// CHECK: @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float> + return _mm256_maskz_cvtts_roundps_epu32(U,A,_MM_FROUND_NEARBYINT); +} + +// X64: {{.*}} +// X86: {{.*}} diff --git a/clang/test/CodeGenCXX/const-base-cast.cpp b/clang/test/CodeGenCXX/const-base-cast.cpp index bb08b9d..7f2c66e 100644 --- a/clang/test/CodeGenCXX/const-base-cast.cpp +++ b/clang/test/CodeGenCXX/const-base-cast.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm %s -o - -fexperimental-new-constant-interpreter | FileCheck %s // Check that the following construct, which is similar to one which occurs // in Firefox, is folded correctly. diff --git a/clang/test/CodeGenHLSL/builtins/step.hlsl b/clang/test/CodeGenHLSL/builtins/step.hlsl new file mode 100644 index 0000000..442f493 --- /dev/null +++ b/clang/test/CodeGenHLSL/builtins/step.hlsl @@ -0,0 +1,84 @@ +// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF \
+// RUN: -DFNATTRS=noundef -DTARGET=dx
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: dxil-pc-shadermodel6.3-library %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF \
+// RUN: -DFNATTRS=noundef -DTARGET=dx
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-unknown-vulkan-compute %s -fnative-half-type \
+// RUN: -emit-llvm -disable-llvm-passes -o - | FileCheck %s \
+// RUN: --check-prefixes=CHECK,NATIVE_HALF \
+// RUN: -DFNATTRS="spir_func noundef" -DTARGET=spv
+// RUN: %clang_cc1 -finclude-default-header -x hlsl -triple \
+// RUN: spirv-unknown-vulkan-compute %s -emit-llvm -disable-llvm-passes \
+// RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF \
+// RUN: -DFNATTRS="spir_func noundef" -DTARGET=spv
+
+// NATIVE_HALF: define [[FNATTRS]] half @
+// NATIVE_HALF: call half @llvm.[[TARGET]].step.f16(half
+// NO_HALF: call float @llvm.[[TARGET]].step.f32(float
+// NATIVE_HALF: ret half
+// NO_HALF: ret float
+half test_step_half(half p0, half p1)
+{
+ return step(p0, p1);
+}
+// NATIVE_HALF: define [[FNATTRS]] <2 x half> @
+// NATIVE_HALF: call <2 x half> @llvm.[[TARGET]].step.v2f16(<2 x half>
+// NO_HALF: call <2 x float> @llvm.[[TARGET]].step.v2f32(<2 x float>
+// NATIVE_HALF: ret <2 x half> %hlsl.step
+// NO_HALF: ret <2 x float> %hlsl.step
+half2 test_step_half2(half2 p0, half2 p1)
+{
+ return step(p0, p1);
+}
+// NATIVE_HALF: define [[FNATTRS]] <3 x half> @
+// NATIVE_HALF: call <3 x half> @llvm.[[TARGET]].step.v3f16(<3 x half>
+// NO_HALF: call <3 x float> @llvm.[[TARGET]].step.v3f32(<3 x float>
+// NATIVE_HALF: ret <3 x half> %hlsl.step
+// NO_HALF: ret <3 x float> %hlsl.step
+half3 test_step_half3(half3 p0, half3 p1)
+{
+ return step(p0, p1);
+}
+// NATIVE_HALF: define [[FNATTRS]] <4 x half> @
+// NATIVE_HALF: call <4 x half> @llvm.[[TARGET]].step.v4f16(<4 x half>
+// NO_HALF: call <4 x float> @llvm.[[TARGET]].step.v4f32(<4 x float>
+// NATIVE_HALF: ret <4 x half> %hlsl.step
+// NO_HALF: ret <4 x float> %hlsl.step
+half4 test_step_half4(half4 p0, half4 p1)
+{
+ return step(p0, p1);
+}
+
+// CHECK: define [[FNATTRS]] float @
+// CHECK: call float @llvm.[[TARGET]].step.f32(float
+// CHECK: ret float
+float test_step_float(float p0, float p1)
+{
+ return step(p0, p1);
+}
+// CHECK: define [[FNATTRS]] <2 x float> @
+// CHECK: %hlsl.step = call <2 x float> @llvm.[[TARGET]].step.v2f32(
+// CHECK: ret <2 x float> %hlsl.step
+float2 test_step_float2(float2 p0, float2 p1)
+{
+ return step(p0, p1);
+}
+// CHECK: define [[FNATTRS]] <3 x float> @
+// CHECK: %hlsl.step = call <3 x float> @llvm.[[TARGET]].step.v3f32(
+// CHECK: ret <3 x float> %hlsl.step
+float3 test_step_float3(float3 p0, float3 p1)
+{
+ return step(p0, p1);
+}
+// CHECK: define [[FNATTRS]] <4 x float> @
+// CHECK: %hlsl.step = call <4 x float> @llvm.[[TARGET]].step.v4f32(
+// CHECK: ret <4 x float> %hlsl.step
+float4 test_step_float4(float4 p0, float4 p1)
+{
+ return step(p0, p1);
+}
diff --git a/clang/test/CodeGenObjC/boxing.m b/clang/test/CodeGenObjC/boxing.m index 3f857e0..c124f17 100644 --- a/clang/test/CodeGenObjC/boxing.m +++ b/clang/test/CodeGenObjC/boxing.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o - %s -fexperimental-new-constant-interpreter | FileCheck %s typedef long NSInteger; typedef unsigned long NSUInteger; diff --git a/clang/test/SemaHLSL/BuiltIns/step-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/step-errors.hlsl new file mode 100644 index 0000000..8235852 --- /dev/null +++ b/clang/test/SemaHLSL/BuiltIns/step-errors.hlsl @@ -0,0 +1,31 @@ +// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -disable-llvm-passes -verify -verify-ignore-unexpected
+
+void test_too_few_arg()
+{
+ return __builtin_hlsl_step();
+ // expected-error@-1 {{too few arguments to function call, expected 2, have 0}}
+}
+
+void test_too_many_arg(float2 p0)
+{
+ return __builtin_hlsl_step(p0, p0, p0);
+ // expected-error@-1 {{too many arguments to function call, expected 2, have 3}}
+}
+
+bool builtin_bool_to_float_type_promotion(bool p1)
+{
+ return __builtin_hlsl_step(p1, p1);
+ // expected-error@-1 {passing 'bool' to parameter of incompatible type 'float'}}
+}
+
+bool builtin_step_int_to_float_promotion(int p1)
+{
+ return __builtin_hlsl_step(p1, p1);
+ // expected-error@-1 {{passing 'int' to parameter of incompatible type 'float'}}
+}
+
+bool2 builtin_step_int2_to_float2_promotion(int2 p1)
+{
+ return __builtin_hlsl_step(p1, p1);
+ // expected-error@-1 {{passing 'int2' (aka 'vector<int, 2>') to parameter of incompatible type '__attribute__((__vector_size__(2 * sizeof(float)))) float' (vector of 2 'float' values)}}
+}
diff --git a/clang/test/SemaTemplate/pack-deduction.cpp b/clang/test/SemaTemplate/pack-deduction.cpp index e427098..28fb127 100644 --- a/clang/test/SemaTemplate/pack-deduction.cpp +++ b/clang/test/SemaTemplate/pack-deduction.cpp @@ -185,3 +185,17 @@ void Run() { Outer<void>::Inner<0>().Test(1,1); } } + +namespace GH107560 { +int bar(...); + +template <int> struct Int {}; + +template <class ...T> +constexpr auto foo(T... x) -> decltype(bar(T(x)...)) { return 10; } + +template <class ...T> +constexpr auto baz(Int<foo<T>(T())>... x) -> int { return 1; } + +static_assert(baz<Int<1>, Int<2>, Int<3>>(Int<10>(), Int<10>(), Int<10>()) == 1, ""); +} diff --git a/clang/tools/CMakeLists.txt b/clang/tools/CMakeLists.txt index 4885afc..f588a36 100644 --- a/clang/tools/CMakeLists.txt +++ b/clang/tools/CMakeLists.txt @@ -5,7 +5,6 @@ add_clang_subdirectory(driver) add_clang_subdirectory(apinotes-test) add_clang_subdirectory(clang-diff) add_clang_subdirectory(clang-format) -add_clang_subdirectory(clang-format-vs) add_clang_subdirectory(clang-fuzzer) add_clang_subdirectory(clang-import-test) add_clang_subdirectory(clang-linker-wrapper) diff --git a/clang/tools/clang-format-vs/.gitignore b/clang/tools/clang-format-vs/.gitignore deleted file mode 100644 index 270d840..0000000 --- a/clang/tools/clang-format-vs/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -# Visual Studio files -.vs/ -*.user -/packages/ -/ClangFormat/obj/ -/ClangFormat/bin/ - -# Generated and copied files -/ClangFormat/Key.snk -/ClangFormat/clang-format.exe -/ClangFormat/source.extension.vsixmanifest diff --git a/clang/tools/clang-format-vs/CMakeLists.txt b/clang/tools/clang-format-vs/CMakeLists.txt deleted file mode 100644 index 1d44a47..0000000 --- a/clang/tools/clang-format-vs/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -option(BUILD_CLANG_FORMAT_VS_PLUGIN "Build clang-format VS plugin" OFF) -if (BUILD_CLANG_FORMAT_VS_PLUGIN) - add_custom_target(clang_format_exe_for_vsix - ${CMAKE_COMMAND} -E copy_if_different - "${LLVM_TOOLS_BINARY_DIR}/clang-format.exe" - "${CMAKE_CURRENT_SOURCE_DIR}/ClangFormat/clang-format.exe" - DEPENDS clang-format) - - # Build number added to Clang version to ensure that new VSIX can be upgraded - string(TIMESTAMP CLANG_FORMAT_VSIX_BUILD %y%m%d%H%M UTC) - - if (NOT CLANG_FORMAT_VS_VERSION) - set(CLANG_FORMAT_VS_VERSION "${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}.${CLANG_FORMAT_VSIX_BUILD}") - endif() - - configure_file("source.extension.vsixmanifest.in" - "${CMAKE_CURRENT_SOURCE_DIR}/ClangFormat/source.extension.vsixmanifest") - - find_program(NUGET_EXE nuget PATHS ${NUGET_EXE_DIR}) - if (NOT NUGET_EXE) - message(FATAL_ERROR "Could not find nuget.exe. Download from https://www.nuget.org/nuget.exe" - " and add parent directory to PATH or pass it via NUGET_EXE_DIR var.") - endif() - - add_custom_target(clang_format_vsix ALL - COMMAND ${NUGET_EXE} restore "${CMAKE_CURRENT_SOURCE_DIR}/ClangFormat.sln" - COMMAND devenv "${CMAKE_CURRENT_SOURCE_DIR}/ClangFormat.sln" /Build Release - DEPENDS clang_format_exe_for_vsix "${CMAKE_CURRENT_SOURCE_DIR}/ClangFormat/source.extension.vsixmanifest" - COMMAND ${CMAKE_COMMAND} -E copy_if_different - "${CMAKE_CURRENT_SOURCE_DIR}/ClangFormat/bin/Release/ClangFormat.vsix" - "${LLVM_TOOLS_BINARY_DIR}/ClangFormat.vsix" - DEPENDS clang_format_exe_for_vsix) -endif() diff --git a/clang/tools/clang-format-vs/ClangFormat.sln b/clang/tools/clang-format-vs/ClangFormat.sln deleted file mode 100644 index 46d742b..0000000 --- a/clang/tools/clang-format-vs/ClangFormat.sln +++ /dev/null @@ -1,22 +0,0 @@ -
-Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 15
-VisualStudioVersion = 15.0.26228.12
-MinimumVisualStudioVersion = 10.0.40219.1
-Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ClangFormat", "ClangFormat\ClangFormat.csproj", "{7FD1783E-2D31-4D05-BF23-6EBE1B42B608}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|Any CPU = Debug|Any CPU
- Release|Any CPU = Release|Any CPU
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {7FD1783E-2D31-4D05-BF23-6EBE1B42B608}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {7FD1783E-2D31-4D05-BF23-6EBE1B42B608}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {7FD1783E-2D31-4D05-BF23-6EBE1B42B608}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {7FD1783E-2D31-4D05-BF23-6EBE1B42B608}.Release|Any CPU.Build.0 = Release|Any CPU
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
-EndGlobal
diff --git a/clang/tools/clang-format-vs/ClangFormat/ClangFormat.csproj b/clang/tools/clang-format-vs/ClangFormat/ClangFormat.csproj deleted file mode 100644 index e5b7ec0..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/ClangFormat.csproj +++ /dev/null @@ -1,261 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="4.0">
- <Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
- <PropertyGroup>
- <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
- <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
- <SchemaVersion>2.0</SchemaVersion>
- <ProjectGuid>{7FD1783E-2D31-4D05-BF23-6EBE1B42B608}</ProjectGuid>
- <ProjectTypeGuids>{82b43b9b-a64c-4715-b499-d71e9ca2bd60};{60dc8134-eba5-43b8-bcc9-bb4bc16c2548};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
- <OutputType>Library</OutputType>
- <AppDesignerFolder>Properties</AppDesignerFolder>
- <RootNamespace>LLVM.ClangFormat</RootNamespace>
- <AssemblyName>ClangFormat</AssemblyName>
- <SignAssembly>true</SignAssembly>
- <AssemblyOriginatorKeyFile>Key.snk</AssemblyOriginatorKeyFile>
- <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>
- <MinimumVisualStudioVersion>15.0</MinimumVisualStudioVersion>
- <FileUpgradeFlags>
- </FileUpgradeFlags>
- <UpgradeBackupLocation>
- </UpgradeBackupLocation>
- <OldToolsVersion>4.0</OldToolsVersion>
- <PublishUrl>publish\</PublishUrl>
- <Install>true</Install>
- <InstallFrom>Disk</InstallFrom>
- <UpdateEnabled>false</UpdateEnabled>
- <UpdateMode>Foreground</UpdateMode>
- <UpdateInterval>7</UpdateInterval>
- <UpdateIntervalUnits>Days</UpdateIntervalUnits>
- <UpdatePeriodically>false</UpdatePeriodically>
- <UpdateRequired>false</UpdateRequired>
- <MapFileExtensions>true</MapFileExtensions>
- <ApplicationRevision>0</ApplicationRevision>
- <ApplicationVersion>1.0.0.%2a</ApplicationVersion>
- <IsWebBootstrapper>false</IsWebBootstrapper>
- <UseApplicationTrust>false</UseApplicationTrust>
- <BootstrapperEnabled>true</BootstrapperEnabled>
- <TargetFrameworkProfile />
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
- <DebugSymbols>true</DebugSymbols>
- <DebugType>full</DebugType>
- <Optimize>false</Optimize>
- <OutputPath>bin\Debug\</OutputPath>
- <DefineConstants>DEBUG;TRACE</DefineConstants>
- <ErrorReport>prompt</ErrorReport>
- <WarningLevel>4</WarningLevel>
- <Prefer32Bit>false</Prefer32Bit>
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
- <DebugType>pdbonly</DebugType>
- <Optimize>true</Optimize>
- <OutputPath>bin\Release\</OutputPath>
- <DefineConstants>TRACE</DefineConstants>
- <ErrorReport>prompt</ErrorReport>
- <WarningLevel>4</WarningLevel>
- <RunCodeAnalysis>true</RunCodeAnalysis>
- <Prefer32Bit>false</Prefer32Bit>
- </PropertyGroup>
- <ItemGroup>
- <Reference Include="envdte, Version=8.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <EmbedInteropTypes>True</EmbedInteropTypes>
- </Reference>
- <Reference Include="envdte80, Version=8.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <EmbedInteropTypes>True</EmbedInteropTypes>
- </Reference>
- <Reference Include="Microsoft.CSharp" />
- <Reference Include="Microsoft.VisualStudio.CoreUtility, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.CoreUtility.10.0.4\lib\net40\Microsoft.VisualStudio.CoreUtility.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Editor, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Editor.10.0.4\lib\net40\Microsoft.VisualStudio.Editor.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.OLE.Interop, Version=7.1.40304.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <HintPath>..\packages\VSSDK.OLE.Interop.7.0.4\lib\net20\Microsoft.VisualStudio.OLE.Interop.dll</HintPath>
- <Private>True</Private>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Shell.10.0, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Shell.10.10.0.3\lib\net40\Microsoft.VisualStudio.Shell.10.0.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Shell.Immutable.10.0, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Shell.Immutable.10.10.0.3\lib\net40\Microsoft.VisualStudio.Shell.Immutable.10.0.dll</HintPath>
- <Private>True</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Shell.Interop, Version=7.1.40304.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <HintPath>..\packages\VSSDK.Shell.Interop.7.0.4\lib\net20\Microsoft.VisualStudio.Shell.Interop.dll</HintPath>
- <Private>True</Private>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Shell.Interop.8.0, Version=8.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <HintPath>..\packages\VSSDK.Shell.Interop.8.8.0.3\lib\net20\Microsoft.VisualStudio.Shell.Interop.8.0.dll</HintPath>
- <Private>True</Private>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Shell.Interop.10.0" />
- <Reference Include="Microsoft.VisualStudio.Shell.Interop.9.0, Version=9.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <HintPath>..\packages\VSSDK.Shell.Interop.9.9.0.3\lib\net20\Microsoft.VisualStudio.Shell.Interop.9.0.dll</HintPath>
- <Private>True</Private>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Text.Data, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Text.10.0.4\lib\net40\Microsoft.VisualStudio.Text.Data.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Text.Logic, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Text.10.0.4\lib\net40\Microsoft.VisualStudio.Text.Logic.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Text.UI, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Text.10.0.4\lib\net40\Microsoft.VisualStudio.Text.UI.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.Text.UI.Wpf, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a, processorArchitecture=MSIL">
- <HintPath>..\packages\VSSDK.Text.10.0.4\lib\net40\Microsoft.VisualStudio.Text.UI.Wpf.dll</HintPath>
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.TextManager.Interop, Version=7.1.40304.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <Private>False</Private>
- </Reference>
- <Reference Include="Microsoft.VisualStudio.TextManager.Interop.8.0, Version=8.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <HintPath>..\packages\VSSDK.TextManager.Interop.8.8.0.4\lib\net20\Microsoft.VisualStudio.TextManager.Interop.8.0.dll</HintPath>
- <Private>True</Private>
- <Private>False</Private>
- </Reference>
- <Reference Include="PresentationCore" />
- <Reference Include="PresentationFramework" />
- <Reference Include="stdole, Version=7.0.3300.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
- <HintPath>..\packages\VSSDK.DTE.7.0.3\lib\net20\stdole.dll</HintPath>
- <EmbedInteropTypes>False</EmbedInteropTypes>
- </Reference>
- <Reference Include="System" />
- <Reference Include="System.ComponentModel.Composition" />
- <Reference Include="System.Core" />
- <Reference Include="System.Data" />
- <Reference Include="System.Design" />
- <Reference Include="System.Drawing" />
- <Reference Include="System.Windows.Forms" />
- <Reference Include="System.Xml" />
- <Reference Include="System.Xml.Linq" />
- <Reference Include="WindowsBase" />
- </ItemGroup>
- <ItemGroup>
- <COMReference Include="Microsoft.VisualStudio.CommandBars">
- <Guid>{1CBA492E-7263-47BB-87FE-639000619B15}</Guid>
- <VersionMajor>8</VersionMajor>
- <VersionMinor>0</VersionMinor>
- <Lcid>0</Lcid>
- <WrapperTool>primary</WrapperTool>
- <Isolated>False</Isolated>
- <EmbedInteropTypes>False</EmbedInteropTypes>
- </COMReference>
- <COMReference Include="stdole">
- <Guid>{00020430-0000-0000-C000-000000000046}</Guid>
- <VersionMajor>2</VersionMajor>
- <VersionMinor>0</VersionMinor>
- <Lcid>0</Lcid>
- <WrapperTool>primary</WrapperTool>
- <Isolated>False</Isolated>
- <EmbedInteropTypes>False</EmbedInteropTypes>
- </COMReference>
- </ItemGroup>
- <ItemGroup>
- <Compile Include="Guids.cs" />
- <Compile Include="Resources.Designer.cs">
- <AutoGen>True</AutoGen>
- <DesignTime>True</DesignTime>
- <DependentUpon>Resources.resx</DependentUpon>
- </Compile>
- <Compile Include="GlobalSuppressions.cs" />
- <Compile Include="ClangFormatPackage.cs">
- <SubType>Component</SubType>
- </Compile>
- <Compile Include="Properties\AssemblyInfo.cs" />
- <Compile Include="PkgCmdID.cs" />
- <Compile Include="RunningDocTableEventsDispatcher.cs" />
- <Compile Include="Vsix.cs" />
- </ItemGroup>
- <ItemGroup>
- <EmbeddedResource Include="Resources.resx">
- <Generator>ResXFileCodeGenerator</Generator>
- <LastGenOutput>Resources.Designer.cs</LastGenOutput>
- <SubType>Designer</SubType>
- </EmbeddedResource>
- <EmbeddedResource Include="VSPackage.resx">
- <MergeWithCTO>true</MergeWithCTO>
- <ManifestResourceName>VSPackage</ManifestResourceName>
- </EmbeddedResource>
- </ItemGroup>
- <ItemGroup>
- <None Include="Key.snk" />
- <None Include="packages.config">
- <SubType>Designer</SubType>
- </None>
- <None Include="source.extension.vsixmanifest">
- <SubType>Designer</SubType>
- </None>
- </ItemGroup>
- <ItemGroup>
- <VSCTCompile Include="ClangFormat.vsct">
- <ResourceName>Menus.ctmenu</ResourceName>
- </VSCTCompile>
- </ItemGroup>
- <ItemGroup>
- <None Include="Resources\Images_32bit.bmp" />
- </ItemGroup>
- <ItemGroup>
- <Content Include="clang-format.exe">
- <IncludeInVSIX>true</IncludeInVSIX>
- </Content>
- <Content Include="license.txt">
- <IncludeInVSIX>true</IncludeInVSIX>
- </Content>
- <Content Include="Resources\Package.ico" />
- </ItemGroup>
- <ItemGroup>
- <BootstrapperPackage Include=".NETFramework,Version=v4.0">
- <Visible>False</Visible>
- <ProductName>Microsoft .NET Framework 4 %28x86 and x64%29</ProductName>
- <Install>true</Install>
- </BootstrapperPackage>
- <BootstrapperPackage Include="Microsoft.Net.Client.3.5">
- <Visible>False</Visible>
- <ProductName>.NET Framework 3.5 SP1 Client Profile</ProductName>
- <Install>false</Install>
- </BootstrapperPackage>
- <BootstrapperPackage Include="Microsoft.Net.Framework.3.5.SP1">
- <Visible>False</Visible>
- <ProductName>.NET Framework 3.5 SP1</ProductName>
- <Install>false</Install>
- </BootstrapperPackage>
- <BootstrapperPackage Include="Microsoft.Windows.Installer.4.5">
- <Visible>False</Visible>
- <ProductName>Windows Installer 4.5</ProductName>
- <Install>true</Install>
- </BootstrapperPackage>
- </ItemGroup>
- <PropertyGroup>
- <UseCodebase>true</UseCodebase>
- </PropertyGroup>
- <PropertyGroup>
- <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">10.0</VisualStudioVersion>
- <VSToolsPath Condition="'$(VSToolsPath)' == ''">$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)</VSToolsPath>
- </PropertyGroup>
- <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />
- <Import Project="$(VSToolsPath)\VSSDK\Microsoft.VsSDK.targets" Condition="'$(VSToolsPath)' != ''" />
- <Import Project="$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v10.0\VSSDK\Microsoft.VsSDK.targets" Condition="false" />
- <PropertyGroup>
- <PreBuildEvent>if not exist $(ProjectDir)Key.snk ("$(FrameworkSDKDir)Bin\NETFX 4.6 Tools\sn.exe" -k $(ProjectDir)Key.snk)</PreBuildEvent>
- </PropertyGroup>
- <!-- To modify your build process, add your task inside one of the targets below and uncomment it.
- Other similar extension points exist, see Microsoft.Common.targets.
- <Target Name="BeforeBuild">
- </Target>
- <Target Name="AfterBuild">
- </Target>
- -->
-</Project>
diff --git a/clang/tools/clang-format-vs/ClangFormat/ClangFormat.vsct b/clang/tools/clang-format-vs/ClangFormat/ClangFormat.vsct deleted file mode 100644 index 7989577..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/ClangFormat.vsct +++ /dev/null @@ -1,127 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?>
-<CommandTable xmlns="http://schemas.microsoft.com/VisualStudio/2005-10-18/CommandTable" xmlns:xs="http://www.w3.org/2001/XMLSchema">
-
- <!-- This is the file that defines the actual layout and type of the commands.
- It is divided in different sections (e.g. command definition, command
- placement, ...), with each defining a specific set of properties.
- See the comment before each section for more details about how to
- use it. -->
-
- <!-- The VSCT compiler (the tool that translates this file into the binary
- format that VisualStudio will consume) has the ability to run a preprocessor
- on the vsct file; this preprocessor is (usually) the C++ preprocessor, so
- it is possible to define includes and macros with the same syntax used
- in C++ files. Using this ability of the compiler here, we include some files
- defining some of the constants that we will use inside the file. -->
-
- <!--This is the file that defines the IDs for all the commands exposed by VisualStudio. -->
- <Extern href="stdidcmd.h"/>
-
- <!--This header contains the command ids for the menus provided by the shell. -->
- <Extern href="vsshlids.h"/>
-
-
-
-
- <!--The Commands section is where we the commands, menus and menu groups are defined.
- This section uses a Guid to identify the package that provides the command defined inside it. -->
- <Commands package="guidClangFormatPkg">
- <!-- Inside this section we have different sub-sections: one for the menus, another
- for the menu groups, one for the buttons (the actual commands), one for the combos
- and the last one for the bitmaps used. Each element is identified by a command id that
- is a unique pair of guid and numeric identifier; the guid part of the identifier is usually
- called "command set" and is used to group different command inside a logically related
- group; your package should define its own command set in order to avoid collisions
- with command ids defined by other packages. -->
-
-
- <!-- In this section you can define new menu groups. A menu group is a container for
- other menus or buttons (commands); from a visual point of view you can see the
- group as the part of a menu contained between two lines. The parent of a group
- must be a menu. -->
- <Groups>
-
- <Group guid="guidClangFormatCmdSet" id="MyMenuGroup" priority="0x0600">
- <Parent guid="guidSHLMainMenu" id="IDM_VS_MENU_TOOLS"/>
- </Group>
-
-
-
- </Groups>
-
- <!--Buttons section. -->
- <!--This section defines the elements the user can interact with, like a menu command or a button
- or combo box in a toolbar. -->
- <Buttons>
- <!--To define a menu group you have to specify its ID, the parent menu and its display priority.
- The command is visible and enabled by default. If you need to change the visibility, status, etc, you can use
- the CommandFlag node.
- You can add more than one CommandFlag node e.g.:
- <CommandFlag>DefaultInvisible</CommandFlag>
- <CommandFlag>DynamicVisibility</CommandFlag>
- If you do not want an image next to your command, remove the Icon node /> -->
-
- <Button guid="guidClangFormatCmdSet" id="cmdidClangFormatSelection" priority="0x0100" type="Button">
- <Parent guid="guidClangFormatCmdSet" id="MyMenuGroup" />
- <Icon guid="guidImages" id="bmpPic1" />
- <Strings>
- <ButtonText>Clang Format Selection</ButtonText>
- </Strings>
- </Button>
-
- <Button guid="guidClangFormatCmdSet" id="cmdidClangFormatDocument" priority="0x0101" type="Button">
- <Parent guid="guidClangFormatCmdSet" id="MyMenuGroup" />
- <Icon guid="guidImages" id="bmpPic2" />
- <Strings>
- <ButtonText>Clang Format Document</ButtonText>
- </Strings>
- </Button>
-
- </Buttons>
-
- <!--The bitmaps section is used to define the bitmaps that are used for the commands.-->
- <Bitmaps>
- <!-- The bitmap id is defined in a way that is a little bit different from the others:
- the declaration starts with a guid for the bitmap strip, then there is the resource id of the
- bitmap strip containing the bitmaps and then there are the numeric ids of the elements used
- inside a button definition. An important aspect of this declaration is that the element id
- must be the actual index (1-based) of the bitmap inside the bitmap strip. -->
- <Bitmap guid="guidImages" href="Resources\Images_32bit.bmp" usedList="bmpPic1, bmpPic2, bmpPicSearch, bmpPicX, bmpPicArrows"/>
-
- </Bitmaps>
-
- </Commands>
-
-
- <KeyBindings>
- <KeyBinding guid="guidClangFormatCmdSet" id="cmdidClangFormatSelection" editor="guidTextEditor" key1="R" mod1="Control" key2="F" mod2="Control"/>
- <KeyBinding guid="guidClangFormatCmdSet" id="cmdidClangFormatDocument" editor="guidTextEditor" key1="R" mod1="Control" key2="D" mod2="Control"/>
- </KeyBindings>
-
-
-
- <Symbols>
- <!-- This is the package guid. -->
- <GuidSymbol name="guidClangFormatPkg" value="{c5286038-25d3-4f65-83a8-51fa2df4a146}" />
-
- <!-- This is the guid used to group the menu commands together -->
- <GuidSymbol name="guidClangFormatCmdSet" value="{e39cbab1-0f96-4022-a2bc-da5a9db7eb78}">
-
- <IDSymbol name="MyMenuGroup" value="0x1020" />
- <IDSymbol name="cmdidClangFormatSelection" value="0x0100" />
- <IDSymbol name="cmdidClangFormatDocument" value="0x0101" />
- </GuidSymbol>
-
- <GuidSymbol name="guidTextEditor" value="{8B382828-6202-11d1-8870-0000F87579D2}" />
-
-
- <GuidSymbol name="guidImages" value="{6d53937b-9ae1-42e1-8849-d876dcdbad7b}" >
- <IDSymbol name="bmpPic1" value="1" />
- <IDSymbol name="bmpPic2" value="2" />
- <IDSymbol name="bmpPicSearch" value="3" />
- <IDSymbol name="bmpPicX" value="4" />
- <IDSymbol name="bmpPicArrows" value="5" />
- </GuidSymbol>
- </Symbols>
-
-</CommandTable>
diff --git a/clang/tools/clang-format-vs/ClangFormat/ClangFormatPackage.cs b/clang/tools/clang-format-vs/ClangFormat/ClangFormatPackage.cs deleted file mode 100644 index 26a0af3..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/ClangFormatPackage.cs +++ /dev/null @@ -1,464 +0,0 @@ -//===-- ClangFormatPackages.cs - VSPackage for clang-format ------*- C# -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This class contains a VS extension package that runs clang-format over a -// selection in a VS text editor. -// -//===----------------------------------------------------------------------===// - -using EnvDTE; -using Microsoft.VisualStudio.Shell; -using Microsoft.VisualStudio.Shell.Interop; -using Microsoft.VisualStudio.Text; -using Microsoft.VisualStudio.Text.Editor; -using System; -using System.Collections; -using System.ComponentModel; -using System.ComponentModel.Design; -using System.IO; -using System.Runtime.InteropServices; -using System.Xml.Linq; -using System.Linq; -using System.Text; - -namespace LLVM.ClangFormat -{ - [ClassInterface(ClassInterfaceType.AutoDual)] - [CLSCompliant(false), ComVisible(true)] - public class OptionPageGrid : DialogPage - { - private string assumeFilename = ""; - private string fallbackStyle = "LLVM"; - private bool sortIncludes = false; - private string style = "file"; - private bool formatOnSave = false; - private string formatOnSaveFileExtensions = - ".c;.cpp;.cxx;.cc;.tli;.tlh;.h;.hh;.hpp;.hxx;.hh;.inl;" + - ".java;.js;.ts;.m;.mm;.proto;.protodevel;.td"; - - public OptionPageGrid Clone() - { - // Use MemberwiseClone to copy value types. - var clone = (OptionPageGrid)MemberwiseClone(); - return clone; - } - - public class StyleConverter : TypeConverter - { - protected ArrayList values; - public StyleConverter() - { - // Initializes the standard values list with defaults. - values = new ArrayList(new string[] { "file", "Chromium", "Google", "LLVM", "Mozilla", "WebKit" }); - } - - public override bool GetStandardValuesSupported(ITypeDescriptorContext context) - { - return true; - } - - public override StandardValuesCollection GetStandardValues(ITypeDescriptorContext context) - { - return new StandardValuesCollection(values); - } - - public override bool CanConvertFrom(ITypeDescriptorContext context, Type sourceType) - { - if (sourceType == typeof(string)) - return true; - - return base.CanConvertFrom(context, sourceType); - } - - public override object ConvertFrom(ITypeDescriptorContext context, System.Globalization.CultureInfo culture, object value) - { - string s = value as string; - if (s == null) - return base.ConvertFrom(context, culture, value); - - return value; - } - } - - [Category("Format Options")] - [DisplayName("Style")] - [Description("Coding style, currently supports:\n" + - " - Predefined styles ('LLVM', 'Google', 'Chromium', 'Mozilla', 'WebKit').\n" + - " - 'file' to search for a YAML .clang-format or _clang-format\n" + - " configuration file.\n" + - " - A YAML configuration snippet.\n\n" + - "'File':\n" + - " Searches for a .clang-format or _clang-format configuration file\n" + - " in the source file's directory and its parents.\n\n" + - "YAML configuration snippet:\n" + - " The content of a .clang-format configuration file, as string.\n" + - " Example: '{BasedOnStyle: \"LLVM\", IndentWidth: 8}'\n\n" + - "See also: http://clang.llvm.org/docs/ClangFormatStyleOptions.html.")] - [TypeConverter(typeof(StyleConverter))] - public string Style - { - get { return style; } - set { style = value; } - } - - public sealed class FilenameConverter : TypeConverter - { - public override bool CanConvertFrom(ITypeDescriptorContext context, Type sourceType) - { - if (sourceType == typeof(string)) - return true; - - return base.CanConvertFrom(context, sourceType); - } - - public override object ConvertFrom(ITypeDescriptorContext context, System.Globalization.CultureInfo culture, object value) - { - string s = value as string; - if (s == null) - return base.ConvertFrom(context, culture, value); - - // Check if string contains quotes. On Windows, file names cannot contain quotes. - // We do not accept them however to avoid hard-to-debug problems. - // A quote in user input would end the parameter quote and so break the command invocation. - if (s.IndexOf('\"') != -1) - throw new NotSupportedException("Filename cannot contain quotes"); - - return value; - } - } - - [Category("Format Options")] - [DisplayName("Assume Filename")] - [Description("When reading from stdin, clang-format assumes this " + - "filename to look for a style config file (with 'file' style) " + - "and to determine the language.")] - [TypeConverter(typeof(FilenameConverter))] - public string AssumeFilename - { - get { return assumeFilename; } - set { assumeFilename = value; } - } - - public sealed class FallbackStyleConverter : StyleConverter - { - public FallbackStyleConverter() - { - // Add "none" to the list of styles. - values.Insert(0, "none"); - } - } - - [Category("Format Options")] - [DisplayName("Fallback Style")] - [Description("The name of the predefined style used as a fallback in case clang-format " + - "is invoked with 'file' style, but can not find the configuration file.\n" + - "Use 'none' fallback style to skip formatting.")] - [TypeConverter(typeof(FallbackStyleConverter))] - public string FallbackStyle - { - get { return fallbackStyle; } - set { fallbackStyle = value; } - } - - [Category("Format Options")] - [DisplayName("Sort includes")] - [Description("Sort touched include lines.\n\n" + - "See also: http://clang.llvm.org/docs/ClangFormat.html.")] - public bool SortIncludes - { - get { return sortIncludes; } - set { sortIncludes = value; } - } - - [Category("Format On Save")] - [DisplayName("Enable")] - [Description("Enable running clang-format when modified files are saved. " + - "Will only format if Style is found (ignores Fallback Style)." - )] - public bool FormatOnSave - { - get { return formatOnSave; } - set { formatOnSave = value; } - } - - [Category("Format On Save")] - [DisplayName("File extensions")] - [Description("When formatting on save, clang-format will be applied only to " + - "files with these extensions.")] - public string FormatOnSaveFileExtensions - { - get { return formatOnSaveFileExtensions; } - set { formatOnSaveFileExtensions = value; } - } - } - - [PackageRegistration(UseManagedResourcesOnly = true)] - [InstalledProductRegistration("#110", "#112", "1.0", IconResourceID = 400)] - [ProvideMenuResource("Menus.ctmenu", 1)] - [ProvideAutoLoad(UIContextGuids80.SolutionExists)] // Load package on solution load - [Guid(GuidList.guidClangFormatPkgString)] - [ProvideOptionPage(typeof(OptionPageGrid), "LLVM/Clang", "ClangFormat", 0, 0, true)] - public sealed class ClangFormatPackage : Package - { - #region Package Members - - RunningDocTableEventsDispatcher _runningDocTableEventsDispatcher; - - protected override void Initialize() - { - base.Initialize(); - - _runningDocTableEventsDispatcher = new RunningDocTableEventsDispatcher(this); - _runningDocTableEventsDispatcher.BeforeSave += OnBeforeSave; - - var commandService = GetService(typeof(IMenuCommandService)) as OleMenuCommandService; - if (commandService != null) - { - { - var menuCommandID = new CommandID(GuidList.guidClangFormatCmdSet, (int)PkgCmdIDList.cmdidClangFormatSelection); - var menuItem = new MenuCommand(MenuItemCallback, menuCommandID); - commandService.AddCommand(menuItem); - } - - { - var menuCommandID = new CommandID(GuidList.guidClangFormatCmdSet, (int)PkgCmdIDList.cmdidClangFormatDocument); - var menuItem = new MenuCommand(MenuItemCallback, menuCommandID); - commandService.AddCommand(menuItem); - } - } - } - #endregion - - OptionPageGrid GetUserOptions() - { - return (OptionPageGrid)GetDialogPage(typeof(OptionPageGrid)); - } - - private void MenuItemCallback(object sender, EventArgs args) - { - var mc = sender as System.ComponentModel.Design.MenuCommand; - if (mc == null) - return; - - switch (mc.CommandID.ID) - { - case (int)PkgCmdIDList.cmdidClangFormatSelection: - FormatSelection(GetUserOptions()); - break; - - case (int)PkgCmdIDList.cmdidClangFormatDocument: - FormatDocument(GetUserOptions()); - break; - } - } - - private static bool FileHasExtension(string filePath, string fileExtensions) - { - var extensions = fileExtensions.ToLower().Split(new char[] { ';' }, StringSplitOptions.RemoveEmptyEntries); - return extensions.Contains(Path.GetExtension(filePath).ToLower()); - } - - private void OnBeforeSave(object sender, Document document) - { - var options = GetUserOptions(); - - if (!options.FormatOnSave) - return; - - if (!FileHasExtension(document.FullName, options.FormatOnSaveFileExtensions)) - return; - - if (!Vsix.IsDocumentDirty(document)) - return; - - var optionsWithNoFallbackStyle = GetUserOptions().Clone(); - optionsWithNoFallbackStyle.FallbackStyle = "none"; - FormatDocument(document, optionsWithNoFallbackStyle); - } - - /// <summary> - /// Runs clang-format on the current selection - /// </summary> - private void FormatSelection(OptionPageGrid options) - { - IWpfTextView view = Vsix.GetCurrentView(); - if (view == null) - // We're not in a text view. - return; - string text = view.TextBuffer.CurrentSnapshot.GetText(); - int start = view.Selection.Start.Position.GetContainingLine().Start.Position; - int end = view.Selection.End.Position.GetContainingLine().End.Position; - - // clang-format doesn't support formatting a range that starts at the end - // of the file. - if (start >= text.Length && text.Length > 0) - start = text.Length - 1; - string path = Vsix.GetDocumentParent(view); - string filePath = Vsix.GetDocumentPath(view); - - RunClangFormatAndApplyReplacements(text, start, end, path, filePath, options, view); - } - - /// <summary> - /// Runs clang-format on the current document - /// </summary> - private void FormatDocument(OptionPageGrid options) - { - FormatView(Vsix.GetCurrentView(), options); - } - - private void FormatDocument(Document document, OptionPageGrid options) - { - FormatView(Vsix.GetDocumentView(document), options); - } - - private void FormatView(IWpfTextView view, OptionPageGrid options) - { - if (view == null) - // We're not in a text view. - return; - - string filePath = Vsix.GetDocumentPath(view); - var path = Path.GetDirectoryName(filePath); - - string text = view.TextBuffer.CurrentSnapshot.GetText(); - if (!text.EndsWith(Environment.NewLine)) - { - view.TextBuffer.Insert(view.TextBuffer.CurrentSnapshot.Length, Environment.NewLine); - text += Environment.NewLine; - } - - RunClangFormatAndApplyReplacements(text, 0, text.Length, path, filePath, options, view); - } - - private void RunClangFormatAndApplyReplacements(string text, int start, int end, string path, string filePath, OptionPageGrid options, IWpfTextView view) - { - try - { - string replacements = RunClangFormat(text, start, end, path, filePath, options); - ApplyClangFormatReplacements(replacements, view); - } - catch (Exception e) - { - var uiShell = (IVsUIShell)GetService(typeof(SVsUIShell)); - var id = Guid.Empty; - int result; - uiShell.ShowMessageBox( - 0, ref id, - "Error while running clang-format:", - e.Message, - string.Empty, 0, - OLEMSGBUTTON.OLEMSGBUTTON_OK, - OLEMSGDEFBUTTON.OLEMSGDEFBUTTON_FIRST, - OLEMSGICON.OLEMSGICON_INFO, - 0, out result); - } - } - - /// <summary> - /// Runs the given text through clang-format and returns the replacements as XML. - /// - /// Formats the text in range start and end. - /// </summary> - private static string RunClangFormat(string text, int start, int end, string path, string filePath, OptionPageGrid options) - { - string vsixPath = Path.GetDirectoryName( - typeof(ClangFormatPackage).Assembly.Location); - - System.Diagnostics.Process process = new System.Diagnostics.Process(); - process.StartInfo.UseShellExecute = false; - process.StartInfo.FileName = vsixPath + "\\clang-format.exe"; - char[] chars = text.ToCharArray(); - int offset = Encoding.UTF8.GetByteCount(chars, 0, start); - int length = Encoding.UTF8.GetByteCount(chars, 0, end) - offset; - // Poor man's escaping - this will not work when quotes are already escaped - // in the input (but we don't need more). - string style = options.Style.Replace("\"", "\\\""); - string fallbackStyle = options.FallbackStyle.Replace("\"", "\\\""); - process.StartInfo.Arguments = " -offset " + offset + - " -length " + length + - " -output-replacements-xml " + - " -style \"" + style + "\"" + - " -fallback-style \"" + fallbackStyle + "\""; - if (options.SortIncludes) - process.StartInfo.Arguments += " -sort-includes "; - string assumeFilename = options.AssumeFilename; - if (string.IsNullOrEmpty(assumeFilename)) - assumeFilename = filePath; - if (!string.IsNullOrEmpty(assumeFilename)) - process.StartInfo.Arguments += " -assume-filename \"" + assumeFilename + "\""; - process.StartInfo.CreateNoWindow = true; - process.StartInfo.RedirectStandardInput = true; - process.StartInfo.RedirectStandardOutput = true; - process.StartInfo.RedirectStandardError = true; - if (path != null) - process.StartInfo.WorkingDirectory = path; - // We have to be careful when communicating via standard input / output, - // as writes to the buffers will block until they are read from the other side. - // Thus, we: - // 1. Start the process - clang-format.exe will start to read the input from the - // standard input. - try - { - process.Start(); - } - catch (Exception e) - { - throw new Exception( - "Cannot execute " + process.StartInfo.FileName + ".\n\"" + - e.Message + "\".\nPlease make sure it is on the PATH."); - } - // 2. We write everything to the standard output - this cannot block, as clang-format - // reads the full standard input before analyzing it without writing anything to the - // standard output. - StreamWriter utf8Writer = new StreamWriter(process.StandardInput.BaseStream, new UTF8Encoding(false)); - utf8Writer.Write(text); - // 3. We notify clang-format that the input is done - after this point clang-format - // will start analyzing the input and eventually write the output. - utf8Writer.Close(); - // 4. We must read clang-format's output before waiting for it to exit; clang-format - // will close the channel by exiting. - string output = process.StandardOutput.ReadToEnd(); - // 5. clang-format is done, wait until it is fully shut down. - process.WaitForExit(); - if (process.ExitCode != 0) - { - // FIXME: If clang-format writes enough to the standard error stream to block, - // we will never reach this point; instead, read the standard error asynchronously. - throw new Exception(process.StandardError.ReadToEnd()); - } - return output; - } - - /// <summary> - /// Applies the clang-format replacements (xml) to the current view - /// </summary> - private static void ApplyClangFormatReplacements(string replacements, IWpfTextView view) - { - // clang-format returns no replacements if input text is empty - if (replacements.Length == 0) - return; - - string text = view.TextBuffer.CurrentSnapshot.GetText(); - byte[] bytes = Encoding.UTF8.GetBytes(text); - - var root = XElement.Parse(replacements); - var edit = view.TextBuffer.CreateEdit(); - foreach (XElement replacement in root.Descendants("replacement")) - { - int offset = int.Parse(replacement.Attribute("offset").Value); - int length = int.Parse(replacement.Attribute("length").Value); - var span = new Span( - Encoding.UTF8.GetCharCount(bytes, 0, offset), - Encoding.UTF8.GetCharCount(bytes, offset, length)); - edit.Replace(span, replacement.Value); - } - edit.Apply(); - } - } -} diff --git a/clang/tools/clang-format-vs/ClangFormat/GlobalSuppressions.cs b/clang/tools/clang-format-vs/ClangFormat/GlobalSuppressions.cs deleted file mode 100644 index 175a74e..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/GlobalSuppressions.cs +++ /dev/null @@ -1,11 +0,0 @@ -// This file is used by Code Analysis to maintain SuppressMessage
-// attributes that are applied to this project. Project-level
-// suppressions either have no target or are given a specific target
-// and scoped to a namespace, type, member, etc.
-//
-// To add a suppression to this file, right-click the message in the
-// Error List, point to "Suppress Message(s)", and click "In Project
-// Suppression File". You do not need to add suppressions to this
-// file manually.
-
-[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1017:MarkAssembliesWithComVisible")]
diff --git a/clang/tools/clang-format-vs/ClangFormat/Guids.cs b/clang/tools/clang-format-vs/ClangFormat/Guids.cs deleted file mode 100644 index ed1c12d..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Guids.cs +++ /dev/null @@ -1,12 +0,0 @@ -using System;
-
-namespace LLVM.ClangFormat
-{
- static class GuidList
- {
- public const string guidClangFormatPkgString = "c5286038-25d3-4f65-83a8-51fa2df4a146";
- public const string guidClangFormatCmdSetString = "e39cbab1-0f96-4022-a2bc-da5a9db7eb78";
-
- public static readonly Guid guidClangFormatCmdSet = new Guid(guidClangFormatCmdSetString);
- };
-}
diff --git a/clang/tools/clang-format-vs/ClangFormat/PkgCmdID.cs b/clang/tools/clang-format-vs/ClangFormat/PkgCmdID.cs deleted file mode 100644 index c274d1ca..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/PkgCmdID.cs +++ /dev/null @@ -1,8 +0,0 @@ -namespace LLVM.ClangFormat
-{
- static class PkgCmdIDList
- {
- public const uint cmdidClangFormatSelection = 0x100;
- public const uint cmdidClangFormatDocument = 0x101;
- };
-}
diff --git a/clang/tools/clang-format-vs/ClangFormat/Properties/AssemblyInfo.cs b/clang/tools/clang-format-vs/ClangFormat/Properties/AssemblyInfo.cs deleted file mode 100644 index b1cef49..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Properties/AssemblyInfo.cs +++ /dev/null @@ -1,33 +0,0 @@ -using System;
-using System.Reflection;
-using System.Resources;
-using System.Runtime.CompilerServices;
-using System.Runtime.InteropServices;
-
-// General Information about an assembly is controlled through the following
-// set of attributes. Change these attribute values to modify the information
-// associated with an assembly.
-[assembly: AssemblyTitle("ClangFormat")]
-[assembly: AssemblyDescription("")]
-[assembly: AssemblyConfiguration("")]
-[assembly: AssemblyCompany("LLVM")]
-[assembly: AssemblyProduct("ClangFormat")]
-[assembly: AssemblyCopyright("")]
-[assembly: AssemblyTrademark("")]
-[assembly: AssemblyCulture("")]
-[assembly: ComVisible(false)]
-[assembly: CLSCompliant(false)]
-[assembly: NeutralResourcesLanguage("en-US")]
-
-// Version information for an assembly consists of the following four values:
-//
-// Major Version
-// Minor Version
-// Build Number
-// Revision
-//
-// You can specify all the values or you can default the Revision and Build Numbers
-// by using the '*' as shown below:
-
-[assembly: AssemblyVersion("1.1.0.0")]
-[assembly: AssemblyFileVersion("1.1.0.0")]
diff --git a/clang/tools/clang-format-vs/ClangFormat/Resources.Designer.cs b/clang/tools/clang-format-vs/ClangFormat/Resources.Designer.cs deleted file mode 100644 index e3129b3..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Resources.Designer.cs +++ /dev/null @@ -1,63 +0,0 @@ -//------------------------------------------------------------------------------
-// <auto-generated>
-// This code was generated by a tool.
-// Runtime Version:4.0.30319.42000
-//
-// Changes to this file may cause incorrect behavior and will be lost if
-// the code is regenerated.
-// </auto-generated>
-//------------------------------------------------------------------------------
-
-namespace LLVM.ClangFormat {
- using System;
-
-
- /// <summary>
- /// A strongly-typed resource class, for looking up localized strings, etc.
- /// </summary>
- // This class was auto-generated by the StronglyTypedResourceBuilder
- // class via a tool like ResGen or Visual Studio.
- // To add or remove a member, edit your .ResX file then rerun ResGen
- // with the /str option, or rebuild your VS project.
- [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")]
- [global::System.Diagnostics.DebuggerNonUserCodeAttribute()]
- [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
- internal class Resources {
-
- private static global::System.Resources.ResourceManager resourceMan;
-
- private static global::System.Globalization.CultureInfo resourceCulture;
-
- [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")]
- internal Resources() {
- }
-
- /// <summary>
- /// Returns the cached ResourceManager instance used by this class.
- /// </summary>
- [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
- internal static global::System.Resources.ResourceManager ResourceManager {
- get {
- if (object.ReferenceEquals(resourceMan, null)) {
- global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("LLVM.ClangFormat.Resources", typeof(Resources).Assembly);
- resourceMan = temp;
- }
- return resourceMan;
- }
- }
-
- /// <summary>
- /// Overrides the current thread's CurrentUICulture property for all
- /// resource lookups using this strongly typed resource class.
- /// </summary>
- [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)]
- internal static global::System.Globalization.CultureInfo Culture {
- get {
- return resourceCulture;
- }
- set {
- resourceCulture = value;
- }
- }
- }
-}
diff --git a/clang/tools/clang-format-vs/ClangFormat/Resources.resx b/clang/tools/clang-format-vs/ClangFormat/Resources.resx deleted file mode 100644 index 352987a..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Resources.resx +++ /dev/null @@ -1,129 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?>
-<!--
- VS SDK Notes: This resx file contains the resources that will be consumed directly by your package.
- For example, if you chose to create a tool window, there is a resource with ID 'CanNotCreateWindow'. This
- is used in VsPkg.cs to determine the string to show the user if there is an error when attempting to create
- the tool window.
-
- Resources that are accessed directly from your package *by Visual Studio* are stored in the VSPackage.resx
- file.
--->
-<root>
- <!--
- Microsoft ResX Schema
-
- Version 2.0
-
- The primary goals of this format is to allow a simple XML format
- that is mostly human readable. The generation and parsing of the
- various data types are done through the TypeConverter classes
- associated with the data types.
-
- Example:
-
- ... ado.net/XML headers & schema ...
- <resheader name="resmimetype">text/microsoft-resx</resheader>
- <resheader name="version">2.0</resheader>
- <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
- <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
- <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
- <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
- <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
- <value>[base64 mime encoded serialized .NET Framework object]</value>
- </data>
- <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
- <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
- <comment>This is a comment</comment>
- </data>
-
- There are any number of "resheader" rows that contain simple
- name/value pairs.
-
- Each data row contains a name, and value. The row also contains a
- type or mimetype. Type corresponds to a .NET class that support
- text/value conversion through the TypeConverter architecture.
- Classes that don't support this are serialized and stored with the
- mimetype set.
-
- The mimetype is used for serialized objects, and tells the
- ResXResourceReader how to depersist the object. This is currently not
- extensible. For a given mimetype the value must be set accordingly:
-
- Note - application/x-microsoft.net.object.binary.base64 is the format
- that the ResXResourceWriter will generate, however the reader can
- read any of the formats listed below.
-
- mimetype: application/x-microsoft.net.object.binary.base64
- value : The object must be serialized with
- : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
- : and then encoded with base64 encoding.
-
- mimetype: application/x-microsoft.net.object.soap.base64
- value : The object must be serialized with
- : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
- : and then encoded with base64 encoding.
-
- mimetype: application/x-microsoft.net.object.bytearray.base64
- value : The object must be serialized into a byte array
- : using a System.ComponentModel.TypeConverter
- : and then encoded with base64 encoding.
- -->
- <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
- <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
- <xsd:element name="root" msdata:IsDataSet="true">
- <xsd:complexType>
- <xsd:choice maxOccurs="unbounded">
- <xsd:element name="metadata">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:element name="value" type="xsd:string" minOccurs="0" />
- </xsd:sequence>
- <xsd:attribute name="name" use="required" type="xsd:string" />
- <xsd:attribute name="type" type="xsd:string" />
- <xsd:attribute name="mimetype" type="xsd:string" />
- <xsd:attribute ref="xml:space" />
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="assembly">
- <xsd:complexType>
- <xsd:attribute name="alias" type="xsd:string" />
- <xsd:attribute name="name" type="xsd:string" />
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="data">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
- <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
- </xsd:sequence>
- <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
- <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
- <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
- <xsd:attribute ref="xml:space" />
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="resheader">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
- </xsd:sequence>
- <xsd:attribute name="name" type="xsd:string" use="required" />
- </xsd:complexType>
- </xsd:element>
- </xsd:choice>
- </xsd:complexType>
- </xsd:element>
- </xsd:schema>
- <resheader name="resmimetype">
- <value>text/microsoft-resx</value>
- </resheader>
- <resheader name="version">
- <value>2.0</value>
- </resheader>
- <resheader name="reader">
- <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
- </resheader>
- <resheader name="writer">
- <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
- </resheader>
-</root>
\ No newline at end of file diff --git a/clang/tools/clang-format-vs/ClangFormat/Resources/Images_32bit.bmp b/clang/tools/clang-format-vs/ClangFormat/Resources/Images_32bit.bmp Binary files differdeleted file mode 100644 index 2fa7ab00..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Resources/Images_32bit.bmp +++ /dev/null diff --git a/clang/tools/clang-format-vs/ClangFormat/Resources/Package.ico b/clang/tools/clang-format-vs/ClangFormat/Resources/Package.ico Binary files differdeleted file mode 100644 index ea3b23f..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Resources/Package.ico +++ /dev/null diff --git a/clang/tools/clang-format-vs/ClangFormat/RunningDocTableEventsDispatcher.cs b/clang/tools/clang-format-vs/ClangFormat/RunningDocTableEventsDispatcher.cs deleted file mode 100644 index 163f68d..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/RunningDocTableEventsDispatcher.cs +++ /dev/null @@ -1,79 +0,0 @@ -using EnvDTE; -using Microsoft.VisualStudio; -using Microsoft.VisualStudio.Shell; -using Microsoft.VisualStudio.Shell.Interop; -using System.Linq; - -namespace LLVM.ClangFormat -{ - // Exposes event sources for IVsRunningDocTableEvents3 events. - internal sealed class RunningDocTableEventsDispatcher : IVsRunningDocTableEvents3 - { - private RunningDocumentTable _runningDocumentTable; - private DTE _dte; - - public delegate void OnBeforeSaveHander(object sender, Document document); - public event OnBeforeSaveHander BeforeSave; - - public RunningDocTableEventsDispatcher(Package package) - { - _runningDocumentTable = new RunningDocumentTable(package); - _runningDocumentTable.Advise(this); - _dte = (DTE)Package.GetGlobalService(typeof(DTE)); - } - - public int OnAfterAttributeChange(uint docCookie, uint grfAttribs) - { - return VSConstants.S_OK; - } - - public int OnAfterAttributeChangeEx(uint docCookie, uint grfAttribs, IVsHierarchy pHierOld, uint itemidOld, string pszMkDocumentOld, IVsHierarchy pHierNew, uint itemidNew, string pszMkDocumentNew) - { - return VSConstants.S_OK; - } - - public int OnAfterDocumentWindowHide(uint docCookie, IVsWindowFrame pFrame) - { - return VSConstants.S_OK; - } - - public int OnAfterFirstDocumentLock(uint docCookie, uint dwRDTLockType, uint dwReadLocksRemaining, uint dwEditLocksRemaining) - { - return VSConstants.S_OK; - } - - public int OnAfterSave(uint docCookie) - { - return VSConstants.S_OK; - } - - public int OnBeforeDocumentWindowShow(uint docCookie, int fFirstShow, IVsWindowFrame pFrame) - { - return VSConstants.S_OK; - } - - public int OnBeforeLastDocumentUnlock(uint docCookie, uint dwRDTLockType, uint dwReadLocksRemaining, uint dwEditLocksRemaining) - { - return VSConstants.S_OK; - } - - public int OnBeforeSave(uint docCookie) - { - if (BeforeSave != null) - { - var document = FindDocumentByCookie(docCookie); - if (document != null) // Not sure why this happens sometimes - { - BeforeSave(this, FindDocumentByCookie(docCookie)); - } - } - return VSConstants.S_OK; - } - - private Document FindDocumentByCookie(uint docCookie) - { - var documentInfo = _runningDocumentTable.GetDocumentInfo(docCookie); - return _dte.Documents.Cast<Document>().FirstOrDefault(doc => doc.FullName == documentInfo.Moniker); - } - } -} diff --git a/clang/tools/clang-format-vs/ClangFormat/VSPackage.resx b/clang/tools/clang-format-vs/ClangFormat/VSPackage.resx deleted file mode 100644 index 81102d3..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/VSPackage.resx +++ /dev/null @@ -1,140 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?>
-<!--
- VS SDK Notes: This resx file contains the resources that will be consumed from your package by Visual Studio.
- For example, Visual Studio will attempt to load resource '400' from this resource stream when it needs to
- load your package's icon. Because Visual Studio will always look in the VSPackage.resources stream first for
- resources it needs, you should put additional resources that Visual Studio will load directly into this resx
- file.
-
- Resources that you would like to access directly from your package in a strong-typed fashion should be stored
- in Resources.resx or another resx file.
--->
-<root>
- <!--
- Microsoft ResX Schema
-
- Version 2.0
-
- The primary goals of this format is to allow a simple XML format
- that is mostly human readable. The generation and parsing of the
- various data types are done through the TypeConverter classes
- associated with the data types.
-
- Example:
-
- ... ado.net/XML headers & schema ...
- <resheader name="resmimetype">text/microsoft-resx</resheader>
- <resheader name="version">2.0</resheader>
- <resheader name="reader">System.Resources.ResXResourceReader, System.Windows.Forms, ...</resheader>
- <resheader name="writer">System.Resources.ResXResourceWriter, System.Windows.Forms, ...</resheader>
- <data name="Name1"><value>this is my long string</value><comment>this is a comment</comment></data>
- <data name="Color1" type="System.Drawing.Color, System.Drawing">Blue</data>
- <data name="Bitmap1" mimetype="application/x-microsoft.net.object.binary.base64">
- <value>[base64 mime encoded serialized .NET Framework object]</value>
- </data>
- <data name="Icon1" type="System.Drawing.Icon, System.Drawing" mimetype="application/x-microsoft.net.object.bytearray.base64">
- <value>[base64 mime encoded string representing a byte array form of the .NET Framework object]</value>
- <comment>This is a comment</comment>
- </data>
-
- There are any number of "resheader" rows that contain simple
- name/value pairs.
-
- Each data row contains a name, and value. The row also contains a
- type or mimetype. Type corresponds to a .NET class that support
- text/value conversion through the TypeConverter architecture.
- Classes that don't support this are serialized and stored with the
- mimetype set.
-
- The mimetype is used for serialized objects, and tells the
- ResXResourceReader how to depersist the object. This is currently not
- extensible. For a given mimetype the value must be set accordingly:
-
- Note - application/x-microsoft.net.object.binary.base64 is the format
- that the ResXResourceWriter will generate, however the reader can
- read any of the formats listed below.
-
- mimetype: application/x-microsoft.net.object.binary.base64
- value : The object must be serialized with
- : System.Runtime.Serialization.Formatters.Binary.BinaryFormatter
- : and then encoded with base64 encoding.
-
- mimetype: application/x-microsoft.net.object.soap.base64
- value : The object must be serialized with
- : System.Runtime.Serialization.Formatters.Soap.SoapFormatter
- : and then encoded with base64 encoding.
-
- mimetype: application/x-microsoft.net.object.bytearray.base64
- value : The object must be serialized into a byte array
- : using a System.ComponentModel.TypeConverter
- : and then encoded with base64 encoding.
- -->
- <xsd:schema id="root" xmlns="" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:msdata="urn:schemas-microsoft-com:xml-msdata">
- <xsd:import namespace="http://www.w3.org/XML/1998/namespace" />
- <xsd:element name="root" msdata:IsDataSet="true">
- <xsd:complexType>
- <xsd:choice maxOccurs="unbounded">
- <xsd:element name="metadata">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:element name="value" type="xsd:string" minOccurs="0" />
- </xsd:sequence>
- <xsd:attribute name="name" use="required" type="xsd:string" />
- <xsd:attribute name="type" type="xsd:string" />
- <xsd:attribute name="mimetype" type="xsd:string" />
- <xsd:attribute ref="xml:space" />
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="assembly">
- <xsd:complexType>
- <xsd:attribute name="alias" type="xsd:string" />
- <xsd:attribute name="name" type="xsd:string" />
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="data">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
- <xsd:element name="comment" type="xsd:string" minOccurs="0" msdata:Ordinal="2" />
- </xsd:sequence>
- <xsd:attribute name="name" type="xsd:string" use="required" msdata:Ordinal="1" />
- <xsd:attribute name="type" type="xsd:string" msdata:Ordinal="3" />
- <xsd:attribute name="mimetype" type="xsd:string" msdata:Ordinal="4" />
- <xsd:attribute ref="xml:space" />
- </xsd:complexType>
- </xsd:element>
- <xsd:element name="resheader">
- <xsd:complexType>
- <xsd:sequence>
- <xsd:element name="value" type="xsd:string" minOccurs="0" msdata:Ordinal="1" />
- </xsd:sequence>
- <xsd:attribute name="name" type="xsd:string" use="required" />
- </xsd:complexType>
- </xsd:element>
- </xsd:choice>
- </xsd:complexType>
- </xsd:element>
- </xsd:schema>
- <resheader name="resmimetype">
- <value>text/microsoft-resx</value>
- </resheader>
- <resheader name="version">
- <value>2.0</value>
- </resheader>
- <resheader name="reader">
- <value>System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
- </resheader>
- <resheader name="writer">
- <value>System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089</value>
- </resheader>
- <assembly alias="System.Windows.Forms" name="System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089" />
- <data name="110" xml:space="preserve">
- <value>ClangFormat</value>
- </data>
- <data name="112" xml:space="preserve">
- <value>Formats code by calling the clang-format executable.</value>
- </data>
- <data name="400" type="System.Resources.ResXFileRef, System.Windows.Forms">
- <value>Resources\Package.ico;System.Drawing.Icon, System.Drawing, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a</value>
- </data>
-</root>
\ No newline at end of file diff --git a/clang/tools/clang-format-vs/ClangFormat/Vsix.cs b/clang/tools/clang-format-vs/ClangFormat/Vsix.cs deleted file mode 100644 index 0d86cb5..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/Vsix.cs +++ /dev/null @@ -1,96 +0,0 @@ -using EnvDTE; -using Microsoft.VisualStudio.Editor; -using Microsoft.VisualStudio.Shell; -using Microsoft.VisualStudio.Shell.Interop; -using Microsoft.VisualStudio.Text; -using Microsoft.VisualStudio.Text.Editor; -using Microsoft.VisualStudio.TextManager.Interop; -using System; -using System.IO; - -namespace LLVM.ClangFormat -{ - internal sealed class Vsix - { - /// <summary> - /// Returns the currently active view if it is a IWpfTextView. - /// </summary> - public static IWpfTextView GetCurrentView() - { - // The SVsTextManager is a service through which we can get the active view. - var textManager = (IVsTextManager)Package.GetGlobalService(typeof(SVsTextManager)); - IVsTextView textView; - textManager.GetActiveView(1, null, out textView); - - // Now we have the active view as IVsTextView, but the text interfaces we need - // are in the IWpfTextView. - return VsToWpfTextView(textView); - } - - public static bool IsDocumentDirty(Document document) - { - var textView = GetDocumentView(document); - var textDocument = GetTextDocument(textView); - return textDocument?.IsDirty == true; - } - - public static IWpfTextView GetDocumentView(Document document) - { - var textView = GetVsTextViewFrompPath(document.FullName); - return VsToWpfTextView(textView); - } - - public static IWpfTextView VsToWpfTextView(IVsTextView textView) - { - var userData = (IVsUserData)textView; - if (userData == null) - return null; - Guid guidWpfViewHost = DefGuidList.guidIWpfTextViewHost; - object host; - userData.GetData(ref guidWpfViewHost, out host); - return ((IWpfTextViewHost)host).TextView; - } - - public static IVsTextView GetVsTextViewFrompPath(string filePath) - { - // From http://stackoverflow.com/a/2427368/4039972 - var dte2 = (EnvDTE80.DTE2)Package.GetGlobalService(typeof(SDTE)); - var sp = (Microsoft.VisualStudio.OLE.Interop.IServiceProvider)dte2; - var serviceProvider = new Microsoft.VisualStudio.Shell.ServiceProvider(sp); - - IVsUIHierarchy uiHierarchy; - uint itemID; - IVsWindowFrame windowFrame; - if (VsShellUtilities.IsDocumentOpen(serviceProvider, filePath, Guid.Empty, - out uiHierarchy, out itemID, out windowFrame)) - { - // Get the IVsTextView from the windowFrame. - return VsShellUtilities.GetTextView(windowFrame); - } - return null; - } - - public static ITextDocument GetTextDocument(IWpfTextView view) - { - ITextDocument document; - if (view != null && view.TextBuffer.Properties.TryGetProperty(typeof(ITextDocument), out document)) - return document; - return null; - } - - public static string GetDocumentParent(IWpfTextView view) - { - ITextDocument document = GetTextDocument(view); - if (document != null) - { - return Directory.GetParent(document.FilePath).ToString(); - } - return null; - } - - public static string GetDocumentPath(IWpfTextView view) - { - return GetTextDocument(view)?.FilePath; - } - } -} diff --git a/clang/tools/clang-format-vs/ClangFormat/license.txt b/clang/tools/clang-format-vs/ClangFormat/license.txt deleted file mode 100644 index 63c17f1..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/license.txt +++ /dev/null @@ -1,261 +0,0 @@ -============================================================================== -The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: -============================================================================== - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ----- LLVM Exceptions to the Apache 2.0 License ---- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into an Object form of such source code, you -may redistribute such embedded portions in such Object form without complying -with the conditions of Sections 4(a), 4(b) and 4(d) of the License. - -In addition, if you combine or link compiled forms of this Software with -software that is licensed under the GPLv2 ("Combined Software") and if a -court of competent jurisdiction determines that the patent provision (Section -3), the indemnity provision (Section 9) or other Section of the License -conflicts with the conditions of the GPLv2, you may retroactively and -prospectively choose to deem waived or otherwise exclude such Section(s) of -the License, but only in their entirety and only with respect to the Combined -Software. - -============================================================================== -Software from third parties included in the LLVM Project: -============================================================================== -The LLVM Project contains third party software which is under different license -terms. All such code will be identified clearly using at least one of two -mechanisms: -1) It will be in a separate directory tree with its own `LICENSE.txt` or - `LICENSE` file at the top containing the specific license and restrictions - which apply to that software, or -2) It will contain specific license and restriction terms at the top of every - file. - -============================================================================== -Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy): -============================================================================== -University of Illinois/NCSA -Open Source License - -Copyright (c) 2007-2018 University of Illinois at Urbana-Champaign. -All rights reserved. - -Developed by: - - LLVM Team - - University of Illinois at Urbana-Champaign - - http://llvm.org - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. - - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. - - * Neither the names of the LLVM Team, University of Illinois at Urbana-Champaign, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. diff --git a/clang/tools/clang-format-vs/ClangFormat/packages.config b/clang/tools/clang-format-vs/ClangFormat/packages.config deleted file mode 100644 index 07dc281..0000000 --- a/clang/tools/clang-format-vs/ClangFormat/packages.config +++ /dev/null @@ -1,21 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?> -<packages> - <package id="VSSDK.CoreUtility" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.CoreUtility.10" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.Editor" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.Editor.10" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.IDE" version="7.0.4" targetFramework="net40" /> - <package id="VSSDK.IDE.10" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.IDE.8" version="8.0.4" targetFramework="net40" /> - <package id="VSSDK.IDE.9" version="9.0.3" targetFramework="net40" /> - <package id="VSSDK.OLE.Interop" version="7.0.4" targetFramework="net40" /> - <package id="VSSDK.Shell.10" version="10.0.3" targetFramework="net40" /> - <package id="VSSDK.Shell.Immutable.10" version="10.0.3" targetFramework="net40" /> - <package id="VSSDK.Shell.Interop" version="7.0.4" targetFramework="net40" /> - <package id="VSSDK.Shell.Interop.8" version="8.0.3" targetFramework="net40" /> - <package id="VSSDK.Shell.Interop.9" version="9.0.3" targetFramework="net40" /> - <package id="VSSDK.Text" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.Text.10" version="10.0.4" targetFramework="net40" /> - <package id="VSSDK.TextManager.Interop" version="7.0.4" targetFramework="net40" /> - <package id="VSSDK.TextManager.Interop.8" version="8.0.4" targetFramework="net40" /> -</packages>
\ No newline at end of file diff --git a/clang/tools/clang-format-vs/README.txt b/clang/tools/clang-format-vs/README.txt deleted file mode 100644 index 2cac5b9..0000000 --- a/clang/tools/clang-format-vs/README.txt +++ /dev/null @@ -1,51 +0,0 @@ -This directory contains a VSPackage project to generate a Visual Studio extension
-for clang-format.
-
-Build prerequisites are:
-- Visual Studio 2015
-- Extensions SDK (you'll be prompted to install it if you open ClangFormat.sln)
-
-The extension is built using CMake to generate the usual LLVM.sln by setting
-the following CMake vars:
-
-- BUILD_CLANG_FORMAT_VS_PLUGIN=ON
-
-- NUGET_EXE_DIR=path/to/nuget_dir (unless nuget.exe is already available in PATH)
-
-example:
- cd /d C:\code\llvm
- mkdir build & cd build
- cmake -DBUILD_CLANG_FORMAT_VS_PLUGIN=ON -DNUGET_EXE_DIR=C:\nuget ..
-
-Once LLVM.sln is generated, build the clang_format_vsix target, which will build
-ClangFormat.sln, the C# extension application.
-
-The CMake build will copy clang-format.exe and LICENSE.TXT into the ClangFormat/
-directory so they can be bundled with the plug-in, as well as creating
-ClangFormat/source.extension.vsixmanifest. Once the plug-in has been built with
-CMake once, it can be built manually from the ClangFormat.sln solution in Visual
-Studio.
-
-===========
- Debugging
-===========
-
-Once you've built the clang_format_vsix project from LLVM.sln at least once,
-open ClangFormat.sln in Visual Studio, then:
-
-- Make sure the "Debug" target is selected
-- Open the ClangFormat project properties
-- Select the Debug tab
-- Set "Start external program:" to where your devenv.exe is installed. Typically
- it's "C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\IDE\devenv.exe"
-- Set "Command line arguments" to: /rootsuffix Exp
-- You can now set breakpoints if you like
-- Press F5 to build and run with debugger
-
-If all goes well, a new instance of Visual Studio will be launched in a special
-mode where it uses the experimental hive instead of the normal configuration hive.
-By default, when you build a VSIX project in Visual Studio, it auto-registers the
-extension in the experimental hive, allowing you to test it. In the new Visual Studio
-instance, open or create a C++ solution, and you should now see the Clang Format
-entries in the Tool menu. You can test it out, and any breakpoints you set will be
-hit where you can debug as usual.
diff --git a/clang/tools/clang-format-vs/source.extension.vsixmanifest.in b/clang/tools/clang-format-vs/source.extension.vsixmanifest.in deleted file mode 100644 index d4820c0..0000000 --- a/clang/tools/clang-format-vs/source.extension.vsixmanifest.in +++ /dev/null @@ -1,19 +0,0 @@ -<?xml version="1.0" encoding="utf-8"?>
-<PackageManifest Version="2.0.0" xmlns="http://schemas.microsoft.com/developer/vsx-schema/2011" xmlns:d="http://schemas.microsoft.com/developer/vsx-schema-design/2011">
- <Metadata>
- <Identity Id="3cb18a5e-97e9-11e7-abc4-cec278b6b50a" Version="@CLANG_FORMAT_VS_VERSION@" Language="en-US" Publisher="LLVM"/>
- <DisplayName>ClangFormat</DisplayName>
- <Description xml:space="preserve">A tool to format C/C++/Obj-C code.</Description>
- <MoreInfo>http://clang.llvm.org/docs/ClangFormat.html</MoreInfo>
- <License>license.txt</License>
- </Metadata>
- <Installation InstalledByMsi="false">
- <InstallationTarget Id="Microsoft.VisualStudio.Pro" Version="[11.0, 17.0)" />
- </Installation>
- <Dependencies>
- <Dependency Id="Microsoft.VisualStudio.MPF" MinVersion="11.0" DisplayName="Visual Studio MPF" />
- </Dependencies>
- <Prerequisites>
- <Prerequisite Id="Microsoft.VisualStudio.Component.CoreEditor" Version="[11.0,)" DisplayName="Visual Studio core editor" />
- </Prerequisites>
-</PackageManifest>
diff --git a/clang/tools/libclang/CXStoredDiagnostic.cpp b/clang/tools/libclang/CXStoredDiagnostic.cpp index c4c2487..0301822 100644 --- a/clang/tools/libclang/CXStoredDiagnostic.cpp +++ b/clang/tools/libclang/CXStoredDiagnostic.cpp @@ -33,14 +33,14 @@ CXDiagnosticSeverity CXStoredDiagnostic::getSeverity() const { case DiagnosticsEngine::Error: return CXDiagnostic_Error; case DiagnosticsEngine::Fatal: return CXDiagnostic_Fatal; } - + llvm_unreachable("Invalid diagnostic level"); } CXSourceLocation CXStoredDiagnostic::getLocation() const { if (Diag.getLocation().isInvalid()) return clang_getNullLocation(); - + return translateSourceLocation(Diag.getLocation().getManager(), LangOpts, Diag.getLocation()); } @@ -57,7 +57,7 @@ CXString CXStoredDiagnostic::getDiagnosticOption(CXString *Disable) const { *Disable = cxstring::createDup((Twine("-Wno-") + Option).str()); return cxstring::createDup((Twine("-W") + Option).str()); } - + if (ID == diag::fatal_too_many_errors) { if (Disable) *Disable = cxstring::createRef("-ferror-limit=0"); @@ -79,7 +79,7 @@ CXString CXStoredDiagnostic::getCategoryText() const { unsigned CXStoredDiagnostic::getNumRanges() const { if (Diag.getLocation().isInvalid()) return 0; - + return Diag.range_size(); } @@ -92,12 +92,12 @@ CXSourceRange CXStoredDiagnostic::getRange(unsigned int Range) const { unsigned CXStoredDiagnostic::getNumFixIts() const { if (Diag.getLocation().isInvalid()) - return 0; + return 0; return Diag.fixit_size(); } CXString CXStoredDiagnostic::getFixIt(unsigned FixIt, - CXSourceRange *ReplacementRange) const { + CXSourceRange *ReplacementRange) const { const FixItHint &Hint = Diag.fixit_begin()[FixIt]; if (ReplacementRange) { // Create a range that covers the entire replacement (or @@ -108,4 +108,3 @@ CXString CXStoredDiagnostic::getFixIt(unsigned FixIt, } return cxstring::createDup(Hint.CodeToInsert); } - diff --git a/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/clang/utils/TableGen/ClangASTNodesEmitter.cpp index 07ddafc..512af83 100644 --- a/clang/utils/TableGen/ClangASTNodesEmitter.cpp +++ b/clang/utils/TableGen/ClangASTNodesEmitter.cpp @@ -34,7 +34,7 @@ class ClangASTNodesEmitter { typedef ChildMap::const_iterator ChildIterator; std::set<ASTNode> PrioritizedClasses; - RecordKeeper &Records; + const RecordKeeper &Records; ASTNode Root; const std::string &NodeClassName; const std::string &BaseSuffix; @@ -70,14 +70,12 @@ class ClangASTNodesEmitter { std::pair<ASTNode, ASTNode> EmitNode(raw_ostream& OS, ASTNode Base); public: - explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N, + explicit ClangASTNodesEmitter(const RecordKeeper &R, const std::string &N, const std::string &S, std::string_view PriorizeIfSubclassOf) : Records(R), NodeClassName(N), BaseSuffix(S) { - auto vecPrioritized = - PriorizeIfSubclassOf.empty() - ? std::vector<Record *>{} - : R.getAllDerivedDefinitions(PriorizeIfSubclassOf); + ArrayRef<const Record *> vecPrioritized = + R.getAllDerivedDefinitionsIfDefined(PriorizeIfSubclassOf); PrioritizedClasses = std::set<ASTNode>(vecPrioritized.begin(), vecPrioritized.end()); } @@ -169,10 +167,7 @@ void ClangASTNodesEmitter::deriveChildTree() { assert(!Root && "already computed tree"); // Emit statements - const std::vector<Record*> Stmts - = Records.getAllDerivedDefinitions(NodeClassName); - - for (auto *R : Stmts) { + for (const Record *R : Records.getAllDerivedDefinitions(NodeClassName)) { if (auto B = R->getValueAsOptionalDef(BaseFieldName)) Tree.insert(std::make_pair(B, R)); else if (Root) @@ -217,14 +212,14 @@ void ClangASTNodesEmitter::run(raw_ostream &OS) { OS << "#undef ABSTRACT_" << macroHierarchyName() << "\n"; } -void clang::EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS, +void clang::EmitClangASTNodes(const RecordKeeper &RK, raw_ostream &OS, const std::string &N, const std::string &S, std::string_view PriorizeIfSubclassOf) { ClangASTNodesEmitter(RK, N, S, PriorizeIfSubclassOf).run(OS); } -void printDeclContext(const std::multimap<Record *, Record *> &Tree, - Record *DeclContext, raw_ostream &OS) { +void printDeclContext(const std::multimap<const Record *, const Record *> &Tree, + const Record *DeclContext, raw_ostream &OS) { if (!DeclContext->getValueAsBit(AbstractFieldName)) OS << "DECL_CONTEXT(" << DeclContext->getName() << ")\n"; auto i = Tree.lower_bound(DeclContext); @@ -236,7 +231,7 @@ void printDeclContext(const std::multimap<Record *, Record *> &Tree, // Emits and addendum to a .inc file to enumerate the clang declaration // contexts. -void clang::EmitClangDeclContext(RecordKeeper &Records, raw_ostream &OS) { +void clang::EmitClangDeclContext(const RecordKeeper &Records, raw_ostream &OS) { // FIXME: Find a .td file format to allow for this to be represented better. emitSourceFileHeader("List of AST Decl nodes", OS, Records); @@ -245,22 +240,15 @@ void clang::EmitClangDeclContext(RecordKeeper &Records, raw_ostream &OS) { OS << "# define DECL_CONTEXT(DECL)\n"; OS << "#endif\n"; - std::vector<Record *> DeclContextsVector = - Records.getAllDerivedDefinitions(DeclContextNodeClassName); - std::vector<Record *> Decls = - Records.getAllDerivedDefinitions(DeclNodeClassName); - - std::multimap<Record *, Record *> Tree; - - const std::vector<Record *> Stmts = - Records.getAllDerivedDefinitions(DeclNodeClassName); + std::multimap<const Record *, const Record *> Tree; - for (auto *R : Stmts) { + for (const Record *R : Records.getAllDerivedDefinitions(DeclNodeClassName)) { if (auto *B = R->getValueAsOptionalDef(BaseFieldName)) Tree.insert(std::make_pair(B, R)); } - for (auto *DeclContext : DeclContextsVector) { + for (const Record *DeclContext : + Records.getAllDerivedDefinitions(DeclContextNodeClassName)) { printDeclContext(Tree, DeclContext, OS); } diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp index d24215d..9b2249a 100644 --- a/clang/utils/TableGen/ClangAttrEmitter.cpp +++ b/clang/utils/TableGen/ClangAttrEmitter.cpp @@ -189,13 +189,12 @@ static StringRef NormalizeGNUAttrSpelling(StringRef AttrSpelling) { typedef std::vector<std::pair<std::string, const Record *>> ParsedAttrMap; -static ParsedAttrMap getParsedAttrList(RecordKeeper &Records, +static ParsedAttrMap getParsedAttrList(const RecordKeeper &Records, ParsedAttrMap *Dupes = nullptr, bool SemaOnly = true) { - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); std::set<std::string> Seen; ParsedAttrMap R; - for (const auto *Attr : Attrs) { + for (const Record *Attr : Records.getAllDerivedDefinitions("Attr")) { if (!SemaOnly || Attr->getValueAsBit("SemaHandler")) { std::string AN; if (Attr->isSubClassOf("TargetSpecificAttr") && @@ -1911,12 +1910,10 @@ static LateAttrParseKind getLateAttrParseKind(const Record *Attr) { } // Emits the LateParsed property for attributes. -static void emitClangAttrLateParsedListImpl(RecordKeeper &Records, +static void emitClangAttrLateParsedListImpl(const RecordKeeper &Records, raw_ostream &OS, LateAttrParseKind LateParseMode) { - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { if (LateAttrParseKind LateParsed = getLateAttrParseKind(Attr); LateParsed != LateParseMode) continue; @@ -1932,14 +1929,14 @@ static void emitClangAttrLateParsedListImpl(RecordKeeper &Records, } } -static void emitClangAttrLateParsedList(RecordKeeper &Records, +static void emitClangAttrLateParsedList(const RecordKeeper &Records, raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_LATE_PARSED_LIST)\n"; emitClangAttrLateParsedListImpl(Records, OS, LateAttrParseKind::Standard); OS << "#endif // CLANG_ATTR_LATE_PARSED_LIST\n\n"; } -static void emitClangAttrLateParsedExperimentalList(RecordKeeper &Records, +static void emitClangAttrLateParsedExperimentalList(const RecordKeeper &Records, raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_LATE_PARSED_EXPERIMENTAL_EXT_LIST)\n"; emitClangAttrLateParsedListImpl(Records, OS, @@ -2066,7 +2063,7 @@ struct PragmaClangAttributeSupport { }; llvm::DenseMap<const Record *, RuleOrAggregateRuleSet> SubjectsToRules; - PragmaClangAttributeSupport(RecordKeeper &Records); + PragmaClangAttributeSupport(const RecordKeeper &Records); bool isAttributedSupported(const Record &Attribute); @@ -2105,9 +2102,7 @@ static bool doesDeclDeriveFrom(const Record *D, const Record *Base) { } PragmaClangAttributeSupport::PragmaClangAttributeSupport( - RecordKeeper &Records) { - std::vector<Record *> MetaSubjects = - Records.getAllDerivedDefinitions("AttrSubjectMatcherRule"); + const RecordKeeper &Records) { auto MapFromSubjectsToRules = [this](const Record *SubjectContainer, const Record *MetaSubject, const Record *Constraint) { @@ -2127,7 +2122,8 @@ PragmaClangAttributeSupport::PragmaClangAttributeSupport( } } }; - for (const auto *MetaSubject : MetaSubjects) { + for (const auto *MetaSubject : + Records.getAllDerivedDefinitions("AttrSubjectMatcherRule")) { MapFromSubjectsToRules(MetaSubject, MetaSubject, /*Constraints=*/nullptr); std::vector<Record *> Constraints = MetaSubject->getValueAsListOfDefs("Constraints"); @@ -2135,11 +2131,10 @@ PragmaClangAttributeSupport::PragmaClangAttributeSupport( MapFromSubjectsToRules(Constraint, MetaSubject, Constraint); } - std::vector<Record *> Aggregates = - Records.getAllDerivedDefinitions("AttrSubjectMatcherAggregateRule"); - std::vector<Record *> DeclNodes = - Records.getAllDerivedDefinitions(DeclNodeClassName); - for (const auto *Aggregate : Aggregates) { + ArrayRef<const Record *> DeclNodes = + Records.getAllDerivedDefinitions(DeclNodeClassName); + for (const auto *Aggregate : + Records.getAllDerivedDefinitions("AttrSubjectMatcherAggregateRule")) { Record *SubjectDecl = Aggregate->getValueAsDef("Subject"); // Gather sub-classes of the aggregate subject that act as attribute @@ -2169,7 +2164,7 @@ PragmaClangAttributeSupport::PragmaClangAttributeSupport( } static PragmaClangAttributeSupport & -getPragmaAttributeSupport(RecordKeeper &Records) { +getPragmaAttributeSupport(const RecordKeeper &Records) { static PragmaClangAttributeSupport Instance(Records); return Instance; } @@ -2403,9 +2398,8 @@ std::map<std::string, std::vector<const Record *>> NameToAttrsMap; /// Build a map from the attribute name to the Attrs that use that name. If more /// than one Attr use a name, the arguments could be different so a more complex /// check is needed in the generated switch. -void generateNameToAttrsMap(RecordKeeper &Records) { - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - for (const auto *A : Attrs) { +void generateNameToAttrsMap(const RecordKeeper &Records) { + for (const auto *A : Records.getAllDerivedDefinitions("Attr")) { std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*A); for (const auto &S : Spellings) { auto It = NameToAttrsMap.find(S.name()); @@ -2510,12 +2504,11 @@ static bool isTypeArgument(const Record *Arg) { } /// Emits the first-argument-is-type property for attributes. -static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) { +static void emitClangAttrTypeArgList(const RecordKeeper &Records, + raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_TYPE_ARG_LIST)\n"; std::map<std::string, FSIVecTy> FSIMap; - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { // Determine whether the first argument is a type. std::vector<Record *> Args = Attr->getValueAsListOfDefs("Args"); if (Args.empty()) @@ -2531,7 +2524,8 @@ static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) { /// Emits the parse-arguments-in-unevaluated-context property for /// attributes. -static void emitClangAttrArgContextList(RecordKeeper &Records, raw_ostream &OS) { +static void emitClangAttrArgContextList(const RecordKeeper &Records, + raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_ARG_CONTEXT_LIST)\n"; std::map<std::string, FSIVecTy> FSIMap; ParsedAttrMap Attrs = getParsedAttrList(Records); @@ -2590,12 +2584,11 @@ static bool isVariadicStringLiteralArgument(const Record *Arg) { return ArgKind == "VariadicStringArgument"; } -static void emitClangAttrVariadicIdentifierArgList(RecordKeeper &Records, +static void emitClangAttrVariadicIdentifierArgList(const RecordKeeper &Records, raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST)\n"; - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); std::map<std::string, FSIVecTy> FSIMap; - for (const auto *A : Attrs) { + for (const auto *A : Records.getAllDerivedDefinitions("Attr")) { // Determine whether the first argument is a variadic identifier. std::vector<Record *> Args = A->getValueAsListOfDefs("Args"); if (Args.empty() || !isVariadicIdentifierArgument(Args[0])) @@ -2608,8 +2601,9 @@ static void emitClangAttrVariadicIdentifierArgList(RecordKeeper &Records, // Emits the list of arguments that should be parsed as unevaluated string // literals for each attribute. -static void emitClangAttrUnevaluatedStringLiteralList(RecordKeeper &Records, - raw_ostream &OS) { +static void +emitClangAttrUnevaluatedStringLiteralList(const RecordKeeper &Records, + raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_STRING_LITERAL_ARG_LIST)\n"; auto MakeMask = [](ArrayRef<Record *> Args) { @@ -2626,9 +2620,8 @@ static void emitClangAttrUnevaluatedStringLiteralList(RecordKeeper &Records, return Bits; }; - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); std::map<std::string, FSIVecTy> FSIMap; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { // Determine whether there are any string arguments. uint32_t ArgMask = MakeMask(Attr->getValueAsListOfDefs("Args")); if (!ArgMask) @@ -2640,12 +2633,11 @@ static void emitClangAttrUnevaluatedStringLiteralList(RecordKeeper &Records, } // Emits the first-argument-is-identifier property for attributes. -static void emitClangAttrIdentifierArgList(RecordKeeper &Records, raw_ostream &OS) { +static void emitClangAttrIdentifierArgList(const RecordKeeper &Records, + raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_IDENTIFIER_ARG_LIST)\n"; - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"); - std::map<std::string, FSIVecTy> FSIMap; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { // Determine whether the first argument is an identifier. std::vector<Record *> Args = Attr->getValueAsListOfDefs("Args"); if (Args.empty() || !isIdentifierArgument(Args[0])) @@ -2657,13 +2649,11 @@ static void emitClangAttrIdentifierArgList(RecordKeeper &Records, raw_ostream &O } // Emits the list for attributes having StrictEnumParameters. -static void emitClangAttrStrictIdentifierArgList(RecordKeeper &Records, +static void emitClangAttrStrictIdentifierArgList(const RecordKeeper &Records, raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_STRICT_IDENTIFIER_ARG_LIST)\n"; - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - std::map<std::string, FSIVecTy> FSIMap; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { if (!Attr->getValueAsBit("StrictEnumParameters")) continue; // Check that there is really an identifier argument. @@ -2684,12 +2674,11 @@ static bool keywordThisIsaIdentifierInArgument(const Record *Arg) { .Default(false); } -static void emitClangAttrThisIsaIdentifierArgList(RecordKeeper &Records, +static void emitClangAttrThisIsaIdentifierArgList(const RecordKeeper &Records, raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST)\n"; - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); std::map<std::string, FSIVecTy> FSIMap; - for (const auto *A : Attrs) { + for (const auto *A : Records.getAllDerivedDefinitions("Attr")) { // Determine whether the first argument is a variadic identifier. std::vector<Record *> Args = A->getValueAsListOfDefs("Args"); if (Args.empty() || !keywordThisIsaIdentifierInArgument(Args[0])) @@ -2700,7 +2689,7 @@ static void emitClangAttrThisIsaIdentifierArgList(RecordKeeper &Records, OS << "#endif // CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST\n\n"; } -static void emitClangAttrAcceptsExprPack(RecordKeeper &Records, +static void emitClangAttrAcceptsExprPack(const RecordKeeper &Records, raw_ostream &OS) { OS << "#if defined(CLANG_ATTR_ACCEPTS_EXPR_PACK)\n"; ParsedAttrMap Attrs = getParsedAttrList(Records); @@ -2733,9 +2722,8 @@ static void emitFormInitializer(raw_ostream &OS, << " /*IsRegularKeywordAttribute*/}"; } -static void emitAttributes(RecordKeeper &Records, raw_ostream &OS, +static void emitAttributes(const RecordKeeper &Records, raw_ostream &OS, bool Header) { - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"); ParsedAttrMap AttrMap = getParsedAttrList(Records); // Helper to print the starting character of an attribute argument. If there @@ -2750,7 +2738,7 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS, << " OS << \", \";\n" << "}\n"; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { const Record &R = *Attr; // FIXME: Currently, documentation is generated as-needed due to the fact @@ -3235,7 +3223,7 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS, } } // Emits the class definitions for attributes. -void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) { +void clang::EmitClangAttrClass(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute classes' definitions", OS, Records); OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n"; @@ -3247,19 +3235,17 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) { } // Emits the class method definitions for attributes. -void clang::EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) { +void clang::EmitClangAttrImpl(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute classes' member function definitions", OS, Records); emitAttributes(Records, OS, false); - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - // Instead of relying on virtual dispatch we just create a huge dispatch // switch. This is both smaller and faster than virtual functions. auto EmitFunc = [&](const char *Method) { OS << " switch (getKind()) {\n"; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { const Record &R = *Attr; if (!R.getValueAsBit("ASTNode")) continue; @@ -3285,7 +3271,7 @@ void clang::EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) { } static void emitAttrList(raw_ostream &OS, StringRef Class, - const std::vector<Record*> &AttrList) { + ArrayRef<const Record *> AttrList) { for (auto Cur : AttrList) { OS << Class << "(" << Cur->getName() << ")\n"; } @@ -3333,13 +3319,13 @@ namespace { /// A class of attributes. struct AttrClass { const AttrClassDescriptor &Descriptor; - Record *TheRecord; + const Record *TheRecord; AttrClass *SuperClass = nullptr; std::vector<AttrClass*> SubClasses; - std::vector<Record*> Attrs; + std::vector<const Record *> Attrs; - AttrClass(const AttrClassDescriptor &Descriptor, Record *R) - : Descriptor(Descriptor), TheRecord(R) {} + AttrClass(const AttrClassDescriptor &Descriptor, const Record *R) + : Descriptor(Descriptor), TheRecord(R) {} void emitDefaultDefines(raw_ostream &OS) const { // Default the macro unless this is a root class (i.e. Attr). @@ -3361,7 +3347,7 @@ namespace { ::emitAttrList(OS, Descriptor.MacroName, Attrs); } - void classifyAttrOnRoot(Record *Attr) { + void classifyAttrOnRoot(const Record *Attr) { bool result = classifyAttr(Attr); assert(result && "failed to classify on root"); (void) result; } @@ -3373,7 +3359,7 @@ namespace { } private: - bool classifyAttr(Record *Attr) { + bool classifyAttr(const Record *Attr) { // Check all the subclasses. for (auto SubClass : SubClasses) { if (SubClass->classifyAttr(Attr)) @@ -3389,13 +3375,13 @@ namespace { return false; } - Record *getFirstAttr() const { + const Record *getFirstAttr() const { if (!SubClasses.empty()) return SubClasses.front()->getFirstAttr(); return Attrs.front(); } - Record *getLastAttr() const { + const Record *getLastAttr() const { if (!Attrs.empty()) return Attrs.back(); return SubClasses.back()->getLastAttr(); @@ -3407,7 +3393,7 @@ namespace { std::vector<std::unique_ptr<AttrClass>> Classes; public: - AttrClassHierarchy(RecordKeeper &Records) { + AttrClassHierarchy(const RecordKeeper &Records) { // Find records for all the classes. for (auto &Descriptor : AttrClassDescriptors) { Record *ClassRecord = Records.getClass(Descriptor.TableGenName); @@ -3453,7 +3439,7 @@ namespace { Class->emitAttrRange(OS); } - void classifyAttr(Record *Attr) { + void classifyAttr(const Record *Attr) { // Add the attribute to the root class. Classes[0]->classifyAttrOnRoot(Attr); } @@ -3467,7 +3453,7 @@ namespace { return nullptr; } - AttrClass *findSuperClass(Record *R) const { + AttrClass *findSuperClass(const Record *R) const { // TableGen flattens the superclass list, so we just need to walk it // in reverse. auto SuperClasses = R->getSuperClasses(); @@ -3484,7 +3470,7 @@ namespace { namespace clang { // Emits the enumeration list for attributes. -void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrList(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("List of all attributes that Clang recognizes", OS, Records); @@ -3494,9 +3480,8 @@ void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) { Hierarchy.emitDefaultDefines(OS); emitDefaultDefine(OS, "PRAGMA_SPELLING_ATTR", nullptr); - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - std::vector<Record *> PragmaAttrs; - for (auto *Attr : Attrs) { + std::vector<const Record *> PragmaAttrs; + for (auto *Attr : Records.getAllDerivedDefinitions("Attr")) { if (!Attr->getValueAsBit("ASTNode")) continue; @@ -3525,7 +3510,8 @@ void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) { } // Emits the enumeration list for attributes. -void EmitClangAttrSubjectMatchRuleList(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrSubjectMatchRuleList(const RecordKeeper &Records, + raw_ostream &OS) { emitSourceFileHeader( "List of all attribute subject matching rules that Clang recognizes", OS, Records); @@ -3537,17 +3523,16 @@ void EmitClangAttrSubjectMatchRuleList(RecordKeeper &Records, raw_ostream &OS) { } // Emits the code to read an attribute from a precompiled header. -void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrPCHRead(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute deserialization code", OS, Records); Record *InhClass = Records.getClass("InheritableAttr"); - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), - ArgRecords; + std::vector<Record *> ArgRecords; std::vector<std::unique_ptr<Argument>> Args; std::unique_ptr<VariadicExprArgument> DelayedArgs; OS << " switch (Kind) {\n"; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { const Record &R = *Attr; if (!R.getValueAsBit("ASTNode")) continue; @@ -3592,19 +3577,17 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) { } // Emits the code to write an attribute to a precompiled header. -void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrPCHWrite(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute serialization code", OS, Records); Record *InhClass = Records.getClass("InheritableAttr"); - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args; - OS << " switch (A->getKind()) {\n"; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { const Record &R = *Attr; if (!R.getValueAsBit("ASTNode")) continue; OS << " case attr::" << R.getName() << ": {\n"; - Args = R.getValueAsListOfDefs("Args"); + std::vector<Record *> Args = R.getValueAsListOfDefs("Args"); if (R.isSubClassOf(InhClass) || !Args.empty()) OS << " const auto *SA = cast<" << R.getName() << "Attr>(A);\n"; @@ -3784,7 +3767,7 @@ static void GenerateHasAttrSpellingStringSwitch( namespace clang { // Emits list of regular keyword attributes with info about their arguments. -void EmitClangRegularKeywordAttributeInfo(RecordKeeper &Records, +void EmitClangRegularKeywordAttributeInfo(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader( "A list of regular keyword attributes generated from the attribute" @@ -3808,13 +3791,12 @@ void EmitClangRegularKeywordAttributeInfo(RecordKeeper &Records, } // Emits the list of spellings for attributes. -void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrHasAttrImpl(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Code to implement the __has_attribute logic", OS, Records); // Separate all of the attributes out into four group: generic, C++11, GNU, // and declspecs. Then generate a big switch statement for each of them. - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); std::vector<std::pair<const Record *, FlattenedSpelling>> Declspec, Microsoft, GNU, Pragma, HLSLAnnotation; std::map<std::string, @@ -3823,7 +3805,7 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) { // Walk over the list of all attributes, and split them out based on the // spelling variety. - for (auto *R : Attrs) { + for (auto *R : Records.getAllDerivedDefinitions("Attr")) { std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*R); for (const auto &SI : Spellings) { const std::string &Variety = SI.variety(); @@ -3895,7 +3877,8 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) { OS << "}\n"; } -void EmitClangAttrSpellingListIndex(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrSpellingListIndex(const RecordKeeper &Records, + raw_ostream &OS) { emitSourceFileHeader("Code to translate different attribute spellings into " "internal identifiers", OS, Records); @@ -3927,16 +3910,14 @@ void EmitClangAttrSpellingListIndex(RecordKeeper &Records, raw_ostream &OS) { } // Emits code used by RecursiveASTVisitor to visit attributes -void EmitClangAttrASTVisitor(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrASTVisitor(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Used by RecursiveASTVisitor to visit attributes.", OS, Records); - - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"); - // Write method declarations for Traverse* methods. // We emit this here because we only generate methods for attributes that // are declared as ASTNodes. OS << "#ifdef ATTR_VISITOR_DECLS_ONLY\n\n"; + ArrayRef<const Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); for (const auto *Attr : Attrs) { const Record &R = *Attr; if (!R.getValueAsBit("ASTNode")) @@ -3999,7 +3980,7 @@ void EmitClangAttrASTVisitor(RecordKeeper &Records, raw_ostream &OS) { OS << "#endif // ATTR_VISITOR_DECLS_ONLY\n"; } -void EmitClangAttrTemplateInstantiateHelper(const std::vector<Record *> &Attrs, +void EmitClangAttrTemplateInstantiateHelper(ArrayRef<const Record *> Attrs, raw_ostream &OS, bool AppliesToDecl) { @@ -4053,11 +4034,12 @@ void EmitClangAttrTemplateInstantiateHelper(const std::vector<Record *> &Attrs, } // Emits code to instantiate dependent attributes on templates. -void EmitClangAttrTemplateInstantiate(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrTemplateInstantiate(const RecordKeeper &Records, + raw_ostream &OS) { emitSourceFileHeader("Template instantiation code for attributes", OS, Records); - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"); + ArrayRef<const Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); OS << "namespace clang {\n" << "namespace sema {\n\n" @@ -4076,7 +4058,7 @@ void EmitClangAttrTemplateInstantiate(RecordKeeper &Records, raw_ostream &OS) { } // Emits the list of parsed attributes. -void EmitClangAttrParsedAttrList(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrParsedAttrList(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("List of all attributes that Clang recognizes", OS, Records); @@ -4344,16 +4326,10 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) { // written into OS and the checks for merging declaration attributes are // written into MergeOS. static void GenerateMutualExclusionsChecks(const Record &Attr, - RecordKeeper &Records, + const RecordKeeper &Records, raw_ostream &OS, raw_ostream &MergeDeclOS, raw_ostream &MergeStmtOS) { - // Find all of the definitions that inherit from MutualExclusions and include - // the given attribute in the list of exclusions to generate the - // diagMutualExclusion() check. - std::vector<Record *> ExclusionsList = - Records.getAllDerivedDefinitions("MutualExclusions"); - // We don't do any of this magic for type attributes yet. if (Attr.isSubClassOf("TypeAttr")) return; @@ -4367,7 +4343,11 @@ static void GenerateMutualExclusionsChecks(const Record &Attr, std::vector<std::string> DeclAttrs, StmtAttrs; - for (const Record *Exclusion : ExclusionsList) { + // Find all of the definitions that inherit from MutualExclusions and include + // the given attribute in the list of exclusions to generate the + // diagMutualExclusion() check. + for (const Record *Exclusion : + Records.getAllDerivedDefinitions("MutualExclusions")) { std::vector<Record *> MutuallyExclusiveAttrs = Exclusion->getValueAsListOfDefs("Exclusions"); auto IsCurAttr = [Attr](const Record *R) { @@ -4670,7 +4650,8 @@ void GenerateIsParamExpr(const Record &Attr, raw_ostream &OS) { OS << "}\n\n"; } -void GenerateHandleAttrWithDelayedArgs(RecordKeeper &Records, raw_ostream &OS) { +void GenerateHandleAttrWithDelayedArgs(const RecordKeeper &Records, + raw_ostream &OS) { OS << "static void handleAttrWithDelayedArgs(Sema &S, Decl *D, "; OS << "const ParsedAttr &Attr) {\n"; OS << " SmallVector<Expr *, 4> ArgExprs;\n"; @@ -4708,7 +4689,7 @@ static bool IsKnownToGCC(const Record &Attr) { } /// Emits the parsed attribute helpers -void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrParsedAttrImpl(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Parsed attribute helpers", OS, Records); OS << "#if !defined(WANT_DECL_MERGE_LOGIC) && " @@ -4872,14 +4853,14 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) { } // Emits the kind list of parsed attributes -void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrParsedAttrKinds(const RecordKeeper &Records, + raw_ostream &OS) { emitSourceFileHeader("Attribute name matcher", OS, Records); - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); std::vector<StringMatcher::StringPair> GNU, Declspec, Microsoft, CXX11, Keywords, Pragma, C23, HLSLAnnotation; std::set<std::string> Seen; - for (const auto *A : Attrs) { + for (const auto *A : Records.getAllDerivedDefinitions("Attr")) { const Record &Attr = *A; bool SemaHandler = Attr.getValueAsBit("SemaHandler"); @@ -4973,11 +4954,10 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) { } // Emits the code to dump an attribute. -void EmitClangAttrTextNodeDump(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrTextNodeDump(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute text node dumper", OS, Records); - std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { const Record &R = *Attr; if (!R.getValueAsBit("ASTNode")) continue; @@ -4993,7 +4973,7 @@ void EmitClangAttrTextNodeDump(RecordKeeper &Records, raw_ostream &OS) { if (Spellings.size() > 1 && !SpellingNamesAreCommon(Spellings)) SS << " OS << \" \" << A->getSpelling();\n"; - Args = R.getValueAsListOfDefs("Args"); + std::vector<Record *> Args = R.getValueAsListOfDefs("Args"); for (const auto *Arg : Args) createArgument(*Arg, R.getName())->writeDump(SS); @@ -5012,11 +4992,10 @@ void EmitClangAttrTextNodeDump(RecordKeeper &Records, raw_ostream &OS) { } } -void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrNodeTraverse(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Attribute text node traverser", OS, Records); - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"), Args; - for (const auto *Attr : Attrs) { + for (const auto *Attr : Records.getAllDerivedDefinitions("Attr")) { const Record &R = *Attr; if (!R.getValueAsBit("ASTNode")) continue; @@ -5024,7 +5003,7 @@ void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) { std::string FunctionContent; llvm::raw_string_ostream SS(FunctionContent); - Args = R.getValueAsListOfDefs("Args"); + std::vector<Record *> Args = R.getValueAsListOfDefs("Args"); for (const auto *Arg : Args) createArgument(*Arg, R.getName())->writeDumpChildren(SS); if (Attr->getValueAsBit("AcceptsExprPack")) @@ -5041,7 +5020,8 @@ void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) { } } -void EmitClangAttrParserStringSwitches(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrParserStringSwitches(const RecordKeeper &Records, + raw_ostream &OS) { generateNameToAttrsMap(Records); emitSourceFileHeader("Parser-related llvm::StringSwitch cases", OS, Records); emitClangAttrArgContextList(Records, OS); @@ -5056,16 +5036,15 @@ void EmitClangAttrParserStringSwitches(RecordKeeper &Records, raw_ostream &OS) { emitClangAttrStrictIdentifierArgList(Records, OS); } -void EmitClangAttrSubjectMatchRulesParserStringSwitches(RecordKeeper &Records, - raw_ostream &OS) { +void EmitClangAttrSubjectMatchRulesParserStringSwitches( + const RecordKeeper &Records, raw_ostream &OS) { getPragmaAttributeSupport(Records).generateParsingHelpers(OS); } -void EmitClangAttrDocTable(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrDocTable(const RecordKeeper &Records, raw_ostream &OS) { emitSourceFileHeader("Clang attribute documentation", OS, Records); - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); - for (const auto *A : Attrs) { + for (const auto *A : Records.getAllDerivedDefinitions("Attr")) { if (!A->getValueAsBit("ASTNode")) continue; std::vector<Record *> Docs = A->getValueAsListOfDefs("Documentation"); @@ -5210,7 +5189,7 @@ GetAttributeHeadingAndSpellings(const Record &Documentation, return std::make_pair(std::move(Heading), std::move(SupportedSpellings)); } -static void WriteDocumentation(RecordKeeper &Records, +static void WriteDocumentation(const RecordKeeper &Records, const DocumentationData &Doc, raw_ostream &OS) { OS << Doc.Heading << "\n" << std::string(Doc.Heading.length(), '-') << "\n"; @@ -5265,7 +5244,7 @@ static void WriteDocumentation(RecordKeeper &Records, OS << "\n\n\n"; } -void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) { +void EmitClangAttrDocs(const RecordKeeper &Records, raw_ostream &OS) { // Get the documentation introduction paragraph. const Record *Documentation = Records.getDef("GlobalDocumentation"); if (!Documentation) { @@ -5278,7 +5257,6 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) { // Gather the Documentation lists from each of the attributes, based on the // category provided. - std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"); struct CategoryLess { bool operator()(const Record *L, const Record *R) const { return L->getValueAsString("Name") < R->getValueAsString("Name"); @@ -5286,7 +5264,7 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) { }; std::map<const Record *, std::vector<DocumentationData>, CategoryLess> SplitDocs; - for (const auto *A : Attrs) { + for (const auto *A : Records.getAllDerivedDefinitions("Attr")) { const Record &Attr = *A; std::vector<Record *> Docs = Attr.getValueAsListOfDefs("Documentation"); for (const auto *D : Docs) { @@ -5325,7 +5303,7 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) { } } -void EmitTestPragmaAttributeSupportedAttributes(RecordKeeper &Records, +void EmitTestPragmaAttributeSupportedAttributes(const RecordKeeper &Records, raw_ostream &OS) { PragmaClangAttributeSupport Support = getPragmaAttributeSupport(Records); ParsedAttrMap Attrs = getParsedAttrList(Records); diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h index 6b8d7f8..c0582e34 100644 --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -24,7 +24,7 @@ class RecordKeeper; namespace clang { -void EmitClangDeclContext(llvm::RecordKeeper &RK, llvm::raw_ostream &OS); +void EmitClangDeclContext(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS); /** @param PriorizeIfSubclassOf These classes should be prioritized in the output. This is useful to force enum generation/jump tables/lookup tables to be more @@ -32,7 +32,7 @@ void EmitClangDeclContext(llvm::RecordKeeper &RK, llvm::raw_ostream &OS); in Decl for classes that inherit from DeclContext, for functions like castFromDeclContext. */ -void EmitClangASTNodes(llvm::RecordKeeper &RK, llvm::raw_ostream &OS, +void EmitClangASTNodes(const llvm::RecordKeeper &RK, llvm::raw_ostream &OS, const std::string &N, const std::string &S, std::string_view PriorizeIfSubclassOf = ""); void EmitClangBasicReader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); @@ -40,38 +40,44 @@ void EmitClangBasicWriter(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitClangTypeNodes(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitClangTypeReader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitClangTypeWriter(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrParserStringSwitches(llvm::RecordKeeper &Records, +void EmitClangAttrParserStringSwitches(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitClangAttrSubjectMatchRulesParserStringSwitches( - llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrClass(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrImpl(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrList(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrSubjectMatchRuleList(llvm::RecordKeeper &Records, + const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitClangAttrClass(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); +void EmitClangAttrImpl(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); +void EmitClangAttrList(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); +void EmitClangAttrSubjectMatchRuleList(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrPCHRead(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrPCHWrite(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangRegularKeywordAttributeInfo(llvm::RecordKeeper &Records, +void EmitClangAttrPCHRead(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); +void EmitClangAttrPCHWrite(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); +void EmitClangRegularKeywordAttributeInfo(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrHasAttrImpl(llvm::RecordKeeper &Records, +void EmitClangAttrHasAttrImpl(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrSpellingListIndex(llvm::RecordKeeper &Records, +void EmitClangAttrSpellingListIndex(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrASTVisitor(llvm::RecordKeeper &Records, +void EmitClangAttrASTVisitor(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrTemplateInstantiate(llvm::RecordKeeper &Records, +void EmitClangAttrTemplateInstantiate(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrParsedAttrList(llvm::RecordKeeper &Records, +void EmitClangAttrParsedAttrList(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrParsedAttrImpl(llvm::RecordKeeper &Records, +void EmitClangAttrParsedAttrImpl(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrParsedAttrKinds(llvm::RecordKeeper &Records, +void EmitClangAttrParsedAttrKinds(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrTextNodeDump(llvm::RecordKeeper &Records, +void EmitClangAttrTextNodeDump(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrNodeTraverse(llvm::RecordKeeper &Records, +void EmitClangAttrNodeTraverse(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrDocTable(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitClangAttrDocTable(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); void EmitClangBuiltins(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); @@ -142,7 +148,8 @@ void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitCdeBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitClangAttrDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); +void EmitClangAttrDocs(const llvm::RecordKeeper &Records, + llvm::raw_ostream &OS); void EmitClangDiagDocs(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitClangOptDocs(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); @@ -157,8 +164,8 @@ void EmitClangOpenCLBuiltinTests(const llvm::RecordKeeper &Records, void EmitClangDataCollectors(const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitTestPragmaAttributeSupportedAttributes(llvm::RecordKeeper &Records, - llvm::raw_ostream &OS); +void EmitTestPragmaAttributeSupportedAttributes( + const llvm::RecordKeeper &Records, llvm::raw_ostream &OS); } // end namespace clang diff --git a/compiler-rt/cmake/base-config-ix.cmake b/compiler-rt/cmake/base-config-ix.cmake index 5a97992..286a622 100644 --- a/compiler-rt/cmake/base-config-ix.cmake +++ b/compiler-rt/cmake/base-config-ix.cmake @@ -81,6 +81,8 @@ if("${COMPILER_RT_TEST_COMPILER}" MATCHES "clang[+]*$") set(COMPILER_RT_TEST_COMPILER_ID Clang) elseif("${COMPILER_RT_TEST_COMPILER}" MATCHES "clang.*.exe$") set(COMPILER_RT_TEST_COMPILER_ID Clang) +elseif("${COMPILER_RT_TEST_COMPILER}" MATCHES "cl.exe$") + set(COMPILER_RT_TEST_COMPILER_ID MSVC) else() set(COMPILER_RT_TEST_COMPILER_ID GNU) endif() diff --git a/compiler-rt/lib/builtins/cpu_model/aarch64.h b/compiler-rt/lib/builtins/cpu_model/aarch64.h index f6cbf75..2a734b0 100644 --- a/compiler-rt/lib/builtins/cpu_model/aarch64.h +++ b/compiler-rt/lib/builtins/cpu_model/aarch64.h @@ -8,7 +8,7 @@ #include "cpu_model.h" -#if !defined(__aarch64__) +#if !defined(__aarch64__) && !defined(__arm64__) && !defined(_M_ARM64) #error This file is intended only for aarch64-based targets #endif diff --git a/compiler-rt/test/lit.common.cfg.py b/compiler-rt/test/lit.common.cfg.py index 1c6fbc8..c533c7e 100644 --- a/compiler-rt/test/lit.common.cfg.py +++ b/compiler-rt/test/lit.common.cfg.py @@ -148,6 +148,9 @@ if compiler_id == "Clang": # requested it because it makes ASan reports more precise. config.debug_info_flags.append("-gcodeview") config.debug_info_flags.append("-gcolumn-info") +elif compiler_id == "MSVC": + config.debug_info_flags = ["/Z7"] + config.cxx_mode_flags = [] elif compiler_id == "GNU": config.cxx_mode_flags = ["-x c++"] config.debug_info_flags = ["-g"] diff --git a/flang/include/flang/Semantics/expression.h b/flang/include/flang/Semantics/expression.h index b1304d7..c90c8c4 100644 --- a/flang/include/flang/Semantics/expression.h +++ b/flang/include/flang/Semantics/expression.h @@ -331,7 +331,7 @@ private: const semantics::Scope &, bool C919bAlreadyEnforced = false); MaybeExpr CompleteSubscripts(ArrayRef &&); MaybeExpr ApplySubscripts(DataRef &&, std::vector<Subscript> &&); - void CheckConstantSubscripts(ArrayRef &); + void CheckSubscripts(ArrayRef &); bool CheckRanks(const DataRef &); // Return false if error exists. bool CheckPolymorphic(const DataRef &); // ditto bool CheckDataRef(const DataRef &); // ditto diff --git a/flang/lib/Evaluate/intrinsics.cpp b/flang/lib/Evaluate/intrinsics.cpp index 876c2ae..166dae9 100644 --- a/flang/lib/Evaluate/intrinsics.cpp +++ b/flang/lib/Evaluate/intrinsics.cpp @@ -2264,7 +2264,7 @@ std::optional<SpecificCall> IntrinsicInterface::Match( messages.Say("'kind=' argument must be a constant scalar integer " "whose value is a supported kind for the " "intrinsic result type"_err_en_US); - return std::nullopt; + // use default kind below for error recovery } else if (kindDummyArg->flags.test(ArgFlag::defaultsToSameKind)) { CHECK(sameArg); resultType = *sameArg->GetType(); @@ -2274,6 +2274,8 @@ std::optional<SpecificCall> IntrinsicInterface::Match( DynamicType{TypeCategory::Integer, defaults.sizeIntegerKind()}; } else { CHECK(kindDummyArg->flags.test(ArgFlag::defaultsToDefaultForResult)); + } + if (!resultType) { int kind{defaults.GetDefaultKind(*category)}; if (*category == TypeCategory::Character) { // ACHAR & CHAR resultType = DynamicType{kind, 1}; diff --git a/flang/lib/Parser/preprocessor.cpp b/flang/lib/Parser/preprocessor.cpp index 7d3130c..cb3725b 100644 --- a/flang/lib/Parser/preprocessor.cpp +++ b/flang/lib/Parser/preprocessor.cpp @@ -769,7 +769,7 @@ void Preprocessor::Directive(const TokenSequence &dir, Prescanner &prescanner) { if (included->bytes() > 0) { ProvenanceRange fileRange{ allSources_.AddIncludedFile(*included, dir.GetProvenanceRange())}; - Prescanner{prescanner, /*isNestedInIncludeDirective=*/true} + Prescanner{prescanner, *this, /*isNestedInIncludeDirective=*/true} .set_encoding(included->encoding()) .Prescan(fileRange); } diff --git a/flang/lib/Parser/prescan.cpp b/flang/lib/Parser/prescan.cpp index 7dcb61a..b594df8 100644 --- a/flang/lib/Parser/prescan.cpp +++ b/flang/lib/Parser/prescan.cpp @@ -32,10 +32,10 @@ Prescanner::Prescanner(Messages &messages, CookedSource &cooked, backslashFreeFormContinuation_{preprocessor.AnyDefinitions()}, encoding_{allSources_.encoding()} {} -Prescanner::Prescanner(const Prescanner &that, bool isNestedInIncludeDirective) - : messages_{that.messages_}, cooked_{that.cooked_}, - preprocessor_{that.preprocessor_}, allSources_{that.allSources_}, - features_{that.features_}, +Prescanner::Prescanner(const Prescanner &that, Preprocessor &prepro, + bool isNestedInIncludeDirective) + : messages_{that.messages_}, cooked_{that.cooked_}, preprocessor_{prepro}, + allSources_{that.allSources_}, features_{that.features_}, isNestedInIncludeDirective_{isNestedInIncludeDirective}, backslashFreeFormContinuation_{that.backslashFreeFormContinuation_}, inFixedForm_{that.inFixedForm_}, @@ -1104,7 +1104,14 @@ void Prescanner::FortranInclude(const char *firstQuote) { provenance, static_cast<std::size_t>(p - nextLine_)}; ProvenanceRange fileRange{ allSources_.AddIncludedFile(*included, includeLineRange)}; - Prescanner{*this, /*isNestedInIncludeDirective=*/false} + Preprocessor cleanPrepro{allSources_}; + if (preprocessor_.IsNameDefined("__FILE__"s)) { + cleanPrepro.DefineStandardMacros(); // __FILE__, __LINE__, &c. + } + if (preprocessor_.IsNameDefined("_CUDA"s)) { + cleanPrepro.Define("_CUDA"s, "1"); + } + Prescanner{*this, cleanPrepro, /*isNestedInIncludeDirective=*/false} .set_encoding(included->encoding()) .Prescan(fileRange); } diff --git a/flang/lib/Parser/prescan.h b/flang/lib/Parser/prescan.h index a64df53..9d4f7c0 100644 --- a/flang/lib/Parser/prescan.h +++ b/flang/lib/Parser/prescan.h @@ -35,7 +35,8 @@ class Prescanner { public: Prescanner(Messages &, CookedSource &, Preprocessor &, common::LanguageFeatureControl); - Prescanner(const Prescanner &, bool isNestedInIncludeDirective); + Prescanner( + const Prescanner &, Preprocessor &, bool isNestedInIncludeDirective); Prescanner(const Prescanner &) = delete; Prescanner(Prescanner &&) = delete; diff --git a/flang/lib/Semantics/check-call.cpp b/flang/lib/Semantics/check-call.cpp index c7ec873..71d1c08 100644 --- a/flang/lib/Semantics/check-call.cpp +++ b/flang/lib/Semantics/check-call.cpp @@ -1363,6 +1363,14 @@ static bool CheckElementalConformance(parser::ContextualMessages &messages, const auto &dummy{proc.dummyArguments.at(index++)}; if (arg) { if (const auto *expr{arg->UnwrapExpr()}) { + if (const auto *wholeSymbol{evaluate::UnwrapWholeSymbolDataRef(arg)}) { + wholeSymbol = &ResolveAssociations(*wholeSymbol); + if (IsAssumedSizeArray(*wholeSymbol)) { + evaluate::SayWithDeclaration(messages, *wholeSymbol, + "Whole assumed-size array '%s' may not be used as an argument to an elemental procedure"_err_en_US, + wholeSymbol->name()); + } + } if (auto argShape{evaluate::GetShape(context, *expr)}) { if (GetRank(*argShape) > 0) { std::string argName{"actual argument ("s + expr->AsFortran() + diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp index c896ee7..b852fbf 100644 --- a/flang/lib/Semantics/check-declarations.cpp +++ b/flang/lib/Semantics/check-declarations.cpp @@ -1587,8 +1587,11 @@ void CheckHelper::CheckExternal(const Symbol &symbol) { } else if (!globalChars->CanBeCalledViaImplicitInterface() && context_.ShouldWarn( common::UsageWarning::ExternalInterfaceMismatch)) { - msg = messages_.Say( - "The global subprogram '%s' may not be referenced via the implicit interface '%s'"_err_en_US, + // TODO: This should be a hard error if the procedure has + // actually been called (as opposed to just being used as a + // procedure pointer target or passed as an actual argument). + msg = WarnIfNotInModuleFile( + "The global subprogram '%s' should not be referenced via the implicit interface '%s'"_warn_en_US, global->name(), symbol.name()); } } diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp index e94a49f..072ebe1 100644 --- a/flang/lib/Semantics/expression.cpp +++ b/flang/lib/Semantics/expression.cpp @@ -298,7 +298,7 @@ MaybeExpr ExpressionAnalyzer::CompleteSubscripts(ArrayRef &&ref) { // Subscripts of named constants are checked in folding. // Subscripts of DATA statement objects are checked in data statement // conversion to initializers. - CheckConstantSubscripts(ref); + CheckSubscripts(ref); } return Designate(DataRef{std::move(ref)}); } @@ -326,7 +326,7 @@ MaybeExpr ExpressionAnalyzer::ApplySubscripts( std::move(dataRef.u)); } -void ExpressionAnalyzer::CheckConstantSubscripts(ArrayRef &ref) { +void ExpressionAnalyzer::CheckSubscripts(ArrayRef &ref) { // Fold subscript expressions and check for an empty triplet. const Symbol &arraySymbol{ref.base().GetLastSymbol()}; Shape lb{GetLBOUNDs(foldingContext_, NamedEntity{arraySymbol})}; @@ -390,6 +390,13 @@ void ExpressionAnalyzer::CheckConstantSubscripts(ArrayRef &ref) { for (Subscript &ss : ref.subscript()) { auto dimLB{ToInt64(lb[dim])}; auto dimUB{ToInt64(ub[dim])}; + if (dimUB && dimLB && *dimUB < *dimLB) { + AttachDeclaration( + Say("Empty array dimension %d cannot be subscripted as an element or non-empty array section"_err_en_US, + dim + 1), + arraySymbol); + break; + } std::optional<ConstantSubscript> val[2]; int vals{0}; if (auto *triplet{std::get_if<Triplet>(&ss.u)}) { diff --git a/flang/module/__fortran_builtins.f90 b/flang/module/__fortran_builtins.f90 index a9d3ac8..d1d4a63 100644 --- a/flang/module/__fortran_builtins.f90 +++ b/flang/module/__fortran_builtins.f90 @@ -6,7 +6,7 @@ ! !===------------------------------------------------------------------------===! -include '../include/flang/Runtime/magic-numbers.h' +#include '../include/flang/Runtime/magic-numbers.h' ! These naming shenanigans prevent names from Fortran intrinsic modules ! from being usable on INTRINSIC statements, and force the program diff --git a/flang/module/__fortran_ieee_exceptions.f90 b/flang/module/__fortran_ieee_exceptions.f90 index cebd604..6691012 100644 --- a/flang/module/__fortran_ieee_exceptions.f90 +++ b/flang/module/__fortran_ieee_exceptions.f90 @@ -11,7 +11,7 @@ ! here under another name so that IEEE_ARITHMETIC can USE it and export its ! declarations without clashing with a non-intrinsic module in a program. -include '../include/flang/Runtime/magic-numbers.h' +#include '../include/flang/Runtime/magic-numbers.h' module __fortran_ieee_exceptions use __fortran_builtins, only: & diff --git a/flang/module/ieee_arithmetic.f90 b/flang/module/ieee_arithmetic.f90 index 32e640b..7eaa7db 100644 --- a/flang/module/ieee_arithmetic.f90 +++ b/flang/module/ieee_arithmetic.f90 @@ -8,7 +8,7 @@ ! Fortran 2018 Clause 17 -include '../include/flang/Runtime/magic-numbers.h' +#include '../include/flang/Runtime/magic-numbers.h' module ieee_arithmetic ! F18 Clause 17.1p1: diff --git a/flang/module/iso_fortran_env.f90 b/flang/module/iso_fortran_env.f90 index cc1f58e..4e575b4 100644 --- a/flang/module/iso_fortran_env.f90 +++ b/flang/module/iso_fortran_env.f90 @@ -8,7 +8,7 @@ ! See Fortran 2023, subclause 16.10.2 -include '../include/flang/Runtime/magic-numbers.h' +#include '../include/flang/Runtime/magic-numbers.h' module iso_fortran_env diff --git a/flang/runtime/edit-input.cpp b/flang/runtime/edit-input.cpp index 61b070b..2cee35e 100644 --- a/flang/runtime/edit-input.cpp +++ b/flang/runtime/edit-input.cpp @@ -54,6 +54,10 @@ static RT_API_ATTRS bool CheckCompleteListDirectedField( } } +static inline RT_API_ATTRS char32_t GetSeparatorChar(const DataEdit &edit) { + return edit.modes.editingFlags & decimalComma ? char32_t{';'} : char32_t{','}; +} + template <int LOG2_BASE> static RT_API_ATTRS bool EditBOZInput( IoStatementState &io, const DataEdit &edit, void *n, std::size_t bytes) { @@ -70,6 +74,7 @@ static RT_API_ATTRS bool EditBOZInput( // Count significant digits after any leading white space & zeroes int digits{0}; int significantBits{0}; + const char32_t comma{GetSeparatorChar(edit)}; for (; next; next = io.NextInField(remaining, edit)) { char32_t ch{*next}; if (ch == ' ' || ch == '\t') { @@ -84,7 +89,7 @@ static RT_API_ATTRS bool EditBOZInput( } else if (LOG2_BASE >= 4 && ch >= '8' && ch <= '9') { } else if (LOG2_BASE >= 4 && ch >= 'A' && ch <= 'F') { } else if (LOG2_BASE >= 4 && ch >= 'a' && ch <= 'f') { - } else if (ch == ',') { + } else if (ch == comma) { break; // end non-list-directed field early } else { io.GetIoErrorHandler().SignalError( @@ -209,6 +214,7 @@ RT_API_ATTRS bool EditIntegerInput( common::UnsignedInt128 value{0}; bool any{!!sign}; bool overflow{false}; + const char32_t comma{GetSeparatorChar(edit)}; for (; next; next = io.NextInField(remaining, edit)) { char32_t ch{*next}; if (ch == ' ' || ch == '\t') { @@ -221,9 +227,23 @@ RT_API_ATTRS bool EditIntegerInput( int digit{0}; if (ch >= '0' && ch <= '9') { digit = ch - '0'; - } else if (ch == ',') { + } else if (ch == comma) { break; // end non-list-directed field early } else { + if (edit.modes.inNamelist && ch == GetRadixPointChar(edit)) { + // Ignore any fractional part that might appear in NAMELIST integer + // input, like a few other Fortran compilers do. + // TODO: also process exponents? Some compilers do, but they obviously + // can't just be ignored. + while ((next = io.NextInField(remaining, edit))) { + if (*next < '0' || *next > '9') { + break; + } + } + if (!next || *next == comma) { + break; + } + } io.GetIoErrorHandler().SignalError( "Bad character '%lc' in INTEGER input field", ch); return false; diff --git a/flang/test/Driver/include-header.f90 b/flang/test/Driver/include-header.f90 index 789d9952..28b75e0 100644 --- a/flang/test/Driver/include-header.f90 +++ b/flang/test/Driver/include-header.f90 @@ -51,7 +51,7 @@ program B end ! include-test-two.f90 -INCLUDE "basic-header-two.h" +#include "basic-header-two.h" #ifdef Y program Y #else diff --git a/flang/test/Preprocessing/include-file.h b/flang/test/Preprocessing/include-file.h new file mode 100644 index 0000000..4d4404e --- /dev/null +++ b/flang/test/Preprocessing/include-file.h @@ -0,0 +1 @@ +print *, sin(0.), j diff --git a/flang/test/Preprocessing/include-line.F90 b/flang/test/Preprocessing/include-line.F90 new file mode 100644 index 0000000..63ff9d3 --- /dev/null +++ b/flang/test/Preprocessing/include-line.F90 @@ -0,0 +1,6 @@ +! RUN: %flang_fc1 -fdebug-unparse %s -Dj=1 2>&1 | FileCheck %s +! Ensure that macro definitions don't affect INCLUDE lines (unlike #include) +#define sin cos +!CHECK: PRINT *, 0._4, j +include "include-file.h" +end diff --git a/flang/test/Semantics/elemental02.f90 b/flang/test/Semantics/elemental02.f90 new file mode 100644 index 0000000..7f8fb4a --- /dev/null +++ b/flang/test/Semantics/elemental02.f90 @@ -0,0 +1,13 @@ +! RUN: %python %S/test_errors.py %s %flang_fc1 +subroutine s(a) + real a(*) + interface + elemental function ef(efarg) + real, intent(in) :: efarg + end + end interface +!ERROR: Whole assumed-size array 'a' may not be used as an argument to an elemental procedure + print *, sqrt(a) +!ERROR: Whole assumed-size array 'a' may not be used as an argument to an elemental procedure + print *, ef(a) +end diff --git a/flang/test/Semantics/expr-errors06.f90 b/flang/test/Semantics/expr-errors06.f90 index 84872c7..bdcb92c 100644 --- a/flang/test/Semantics/expr-errors06.f90 +++ b/flang/test/Semantics/expr-errors06.f90 @@ -1,7 +1,7 @@ ! RUN: %python %S/test_errors.py %s %flang_fc1 -Werror ! Check out-of-range subscripts subroutine subr(da) - real a(10), da(2,1) + real a(10), da(2,1), empty(1:0,1) integer, parameter :: n(2) = [1, 2] integer unknown !ERROR: DATA statement designator 'a(0_8)' is out of range @@ -39,4 +39,10 @@ subroutine subr(da) print *, da(1,0) !WARNING: Subscript 2 is greater than upper bound 1 for dimension 2 of array print *, da(1,2) + print *, empty([(j,j=1,0)],1) ! ok + print *, empty(1:0,1) ! ok + print *, empty(:,1) ! ok + print *, empty(i:j,k) ! ok + !ERROR: Empty array dimension 1 cannot be subscripted as an element or non-empty array section + print *, empty(i,1) end diff --git a/flang/test/Semantics/kinds06.f90 b/flang/test/Semantics/kinds06.f90 new file mode 100644 index 0000000..f5b488e --- /dev/null +++ b/flang/test/Semantics/kinds06.f90 @@ -0,0 +1,4 @@ +!RUN: %python %S/test_errors.py %s %flang_fc1 +!ERROR: 'kind=' argument must be a constant scalar integer whose value is a supported kind for the intrinsic result type +print *, real(1.,666) +end diff --git a/flang/test/Semantics/local-vs-global.f90 b/flang/test/Semantics/local-vs-global.f90 index 6e2b3c4..3f7e933 100644 --- a/flang/test/Semantics/local-vs-global.f90 +++ b/flang/test/Semantics/local-vs-global.f90 @@ -50,20 +50,20 @@ program test external module_before_1 !WARNING: The global entity 'block_data_before_1' corresponding to the local procedure 'block_data_before_1' is not a callable subprogram external block_data_before_1 - !ERROR: The global subprogram 'explicit_before_1' may not be referenced via the implicit interface 'explicit_before_1' + !WARNING: The global subprogram 'explicit_before_1' should not be referenced via the implicit interface 'explicit_before_1' external explicit_before_1 external implicit_before_1 - !ERROR: The global subprogram 'explicit_func_before_1' may not be referenced via the implicit interface 'explicit_func_before_1' + !WARNING: The global subprogram 'explicit_func_before_1' should not be referenced via the implicit interface 'explicit_func_before_1' external explicit_func_before_1 external implicit_func_before_1 !WARNING: The global entity 'module_after_1' corresponding to the local procedure 'module_after_1' is not a callable subprogram external module_after_1 !WARNING: The global entity 'block_data_after_1' corresponding to the local procedure 'block_data_after_1' is not a callable subprogram external block_data_after_1 - !ERROR: The global subprogram 'explicit_after_1' may not be referenced via the implicit interface 'explicit_after_1' + !WARNING: The global subprogram 'explicit_after_1' should not be referenced via the implicit interface 'explicit_after_1' external explicit_after_1 external implicit_after_1 - !ERROR: The global subprogram 'explicit_func_after_1' may not be referenced via the implicit interface 'explicit_func_after_1' + !WARNING: The global subprogram 'explicit_func_after_1' should not be referenced via the implicit interface 'explicit_func_after_1' external explicit_func_after_1 external implicit_func_after_1 call module_before_1 diff --git a/flang/unittests/Runtime/Namelist.cpp b/flang/unittests/Runtime/Namelist.cpp index f95c5d2..9037fa1 100644 --- a/flang/unittests/Runtime/Namelist.cpp +++ b/flang/unittests/Runtime/Namelist.cpp @@ -305,4 +305,33 @@ TEST(NamelistTests, Comma) { EXPECT_EQ(got, expect); } +// Tests REAL-looking input to integers +TEST(NamelistTests, RealValueForInt) { + OwningPtr<Descriptor> scDesc{ + MakeArray<TypeCategory::Integer, static_cast<int>(sizeof(int))>( + std::vector<int>{}, std::vector<int>{{}})}; + const NamelistGroup::Item items[]{{"j", *scDesc}}; + const NamelistGroup group{"nml", 1, items}; + static char t1[]{"&nml j=123.456/"}; + StaticDescriptor<1, true> statDesc; + Descriptor &internalDesc{statDesc.descriptor()}; + internalDesc.Establish(TypeCode{CFI_type_char}, + /*elementBytes=*/std::strlen(t1), t1, 0, nullptr, CFI_attribute_pointer); + auto inCookie{IONAME(BeginInternalArrayListInput)( + internalDesc, nullptr, 0, __FILE__, __LINE__)}; + ASSERT_TRUE(IONAME(InputNamelist)(inCookie, group)); + ASSERT_EQ(IONAME(EndIoStatement)(inCookie), IostatOk) + << "namelist real input for integer"; + char out[16]; + internalDesc.Establish(TypeCode{CFI_type_char}, /*elementBytes=*/sizeof out, + out, 0, nullptr, CFI_attribute_pointer); + auto outCookie{IONAME(BeginInternalArrayListOutput)( + internalDesc, nullptr, 0, __FILE__, __LINE__)}; + ASSERT_TRUE(IONAME(OutputNamelist)(outCookie, group)); + ASSERT_EQ(IONAME(EndIoStatement)(outCookie), IostatOk) << "namelist output"; + std::string got{out, sizeof out}; + static const std::string expect{" &NML J= 123/ "}; + EXPECT_EQ(got, expect); +} + // TODO: Internal NAMELIST error tests diff --git a/libcxx/docs/ReleaseNotes/20.rst b/libcxx/docs/ReleaseNotes/20.rst index 93d6027..a52b074 100644 --- a/libcxx/docs/ReleaseNotes/20.rst +++ b/libcxx/docs/ReleaseNotes/20.rst @@ -41,6 +41,7 @@ Implemented Papers - P2747R2: ``constexpr`` placement new (`Github <https://github.com/llvm/llvm-project/issues/105427>`__) - P2609R3: Relaxing Ranges Just A Smidge (`Github <https://github.com/llvm/llvm-project/issues/105253>`__) - P2985R0: A type trait for detecting virtual base classes (`Github <https://github.com/llvm/llvm-project/issues/105432>`__) +- ``std::jthread`` and ``<stop_token>`` are not guarded behind ``-fexperimental-library`` anymore Improvements and New Features diff --git a/libcxx/docs/Status/Cxx20Papers.csv b/libcxx/docs/Status/Cxx20Papers.csv index ad788d7..b3c2693 100644 --- a/libcxx/docs/Status/Cxx20Papers.csv +++ b/libcxx/docs/Status/Cxx20Papers.csv @@ -105,7 +105,7 @@ "`P0553R4 <https://wg21.link/P0553R4>`__","Bit operations","2019-07 (Cologne)","|Complete|","9.0","" "`P0631R8 <https://wg21.link/P0631R8>`__","Math Constants","2019-07 (Cologne)","|Complete|","11.0","" "`P0645R10 <https://wg21.link/P0645R10>`__","Text Formatting","2019-07 (Cologne)","|Complete|","14.0","The implementation was complete since LLVM 14, but the feature-test macro was not set until LLVM 19" -"`P0660R10 <https://wg21.link/P0660R10>`__","Stop Token and Joining Thread, Rev 10.","2019-07 (Cologne)","|Complete|","18.0","The paper is implemented but the features are experimental and can be enabled via ``-fexperimental-library``." +"`P0660R10 <https://wg21.link/P0660R10>`__","Stop Token and Joining Thread, Rev 10.","2019-07 (Cologne)","|Complete|","20.0","The feature was implemented since LLVM 18 but was guarded behind ``-fexperimental-library``." "`P0784R7 <https://wg21.link/P0784R7>`__","More constexpr containers","2019-07 (Cologne)","|Complete|","12.0","" "`P0980R1 <https://wg21.link/P0980R1>`__","Making std::string constexpr","2019-07 (Cologne)","|Complete|","15.0","" "`P1004R2 <https://wg21.link/P1004R2>`__","Making std::vector constexpr","2019-07 (Cologne)","|Complete|","15.0","" diff --git a/libcxx/docs/UserDocumentation.rst b/libcxx/docs/UserDocumentation.rst index 273ca8c..3651e52 100644 --- a/libcxx/docs/UserDocumentation.rst +++ b/libcxx/docs/UserDocumentation.rst @@ -69,9 +69,8 @@ The following features are currently considered experimental and are only provid when ``-fexperimental-library`` is passed: * The parallel algorithms library (``<execution>`` and the associated algorithms) -* ``std::stop_token``, ``std::stop_source`` and ``std::stop_callback`` -* ``std::jthread`` * ``std::chrono::tzdb`` and related time zone functionality +* ``<syncstream>`` .. note:: Experimental libraries are experimental. diff --git a/libcxx/include/__config b/libcxx/include/__config index f0a9243..1c0b7c0 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -224,7 +224,6 @@ _LIBCPP_HARDENING_MODE_DEBUG // easier to grep for target specific flags once the feature is complete. # if !defined(_LIBCPP_ENABLE_EXPERIMENTAL) && !defined(_LIBCPP_BUILDING_LIBRARY) # define _LIBCPP_HAS_NO_INCOMPLETE_PSTL -# define _LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN # define _LIBCPP_HAS_NO_EXPERIMENTAL_TZDB # define _LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM # endif diff --git a/libcxx/include/__stop_token/atomic_unique_lock.h b/libcxx/include/__stop_token/atomic_unique_lock.h index 13e59f9..8fb70a4 100644 --- a/libcxx/include/__stop_token/atomic_unique_lock.h +++ b/libcxx/include/__stop_token/atomic_unique_lock.h @@ -7,8 +7,8 @@ // //===----------------------------------------------------------------------===// -#ifndef _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H -#define _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H +#ifndef _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H +#define _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H #include <__bit/popcount.h> #include <__config> @@ -133,8 +133,8 @@ private: _LIBCPP_HIDE_FROM_ABI static constexpr auto __set_locked_bit = [](_State __state) { return __state | _LockedBit; }; }; -#endif // _LIBCPP_STD_VER >= 20 +#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H +#endif // _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_LOCK_H diff --git a/libcxx/include/__stop_token/stop_callback.h b/libcxx/include/__stop_token/stop_callback.h index 760cf2b..8d7167a 100644 --- a/libcxx/include/__stop_token/stop_callback.h +++ b/libcxx/include/__stop_token/stop_callback.h @@ -31,7 +31,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && !defined(_LIBCPP_HAS_NO_THREADS) +#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) template <class _Callback> class _LIBCPP_AVAILABILITY_SYNC stop_callback : private __stop_callback_base { @@ -93,10 +93,10 @@ private: template <class _Callback> _LIBCPP_AVAILABILITY_SYNC stop_callback(stop_token, _Callback) -> stop_callback<_Callback>; -#endif // _LIBCPP_STD_VER >= 20 +#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS -#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && !defined(_LIBCPP_HAS_NO_THREADS) +#endif // _LIBCPP___STOP_TOKEN_STOP_CALLBACK_H diff --git a/libcxx/include/__stop_token/stop_source.h b/libcxx/include/__stop_token/stop_source.h index 7069746..7243856 100644 --- a/libcxx/include/__stop_token/stop_source.h +++ b/libcxx/include/__stop_token/stop_source.h @@ -22,7 +22,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && !defined(_LIBCPP_HAS_NO_THREADS) +#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) struct nostopstate_t { explicit nostopstate_t() = default; @@ -84,8 +84,8 @@ private: __intrusive_shared_ptr<__stop_state> __state_; }; -#endif // _LIBCPP_STD_VER >= 20 +#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && !defined(_LIBCPP_HAS_NO_THREADS) +#endif // _LIBCPP___STOP_TOKEN_STOP_SOURCE_H diff --git a/libcxx/include/__stop_token/stop_token.h b/libcxx/include/__stop_token/stop_token.h index 1bd75cb..b256973 100644 --- a/libcxx/include/__stop_token/stop_token.h +++ b/libcxx/include/__stop_token/stop_token.h @@ -20,7 +20,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && !defined(_LIBCPP_HAS_NO_THREADS) +#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) class _LIBCPP_AVAILABILITY_SYNC stop_token { public: @@ -56,7 +56,7 @@ private: _LIBCPP_HIDE_FROM_ABI explicit stop_token(const __intrusive_shared_ptr<__stop_state>& __state) : __state_(__state) {} }; -#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && !defined(_LIBCPP_HAS_NO_THREADS) +#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS) _LIBCPP_END_NAMESPACE_STD diff --git a/libcxx/include/__thread/jthread.h b/libcxx/include/__thread/jthread.h index b3d5c25..d85ad3b 100644 --- a/libcxx/include/__thread/jthread.h +++ b/libcxx/include/__thread/jthread.h @@ -30,7 +30,7 @@ _LIBCPP_PUSH_MACROS #include <__undef_macros> -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +#if _LIBCPP_STD_VER >= 20 _LIBCPP_BEGIN_NAMESPACE_STD @@ -127,7 +127,7 @@ private: _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +#endif // _LIBCPP_STD_VER >= 20 _LIBCPP_POP_MACROS diff --git a/libcxx/include/condition_variable b/libcxx/include/condition_variable index 5195cd6..229a2ce 100644 --- a/libcxx/include/condition_variable +++ b/libcxx/include/condition_variable @@ -173,7 +173,7 @@ public: template <class _Lock, class _Rep, class _Period, class _Predicate> bool _LIBCPP_HIDE_FROM_ABI wait_for(_Lock& __lock, const chrono::duration<_Rep, _Period>& __d, _Predicate __pred); -# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +# if _LIBCPP_STD_VER >= 20 template <class _Lock, class _Predicate> _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool wait(_Lock& __lock, stop_token __stoken, _Predicate __pred); @@ -186,7 +186,7 @@ public: _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool wait_for(_Lock& __lock, stop_token __stoken, const chrono::duration<_Rep, _Period>& __rel_time, _Predicate __pred); -# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +# endif // _LIBCPP_STD_VER >= 20 }; inline condition_variable_any::condition_variable_any() : __mut_(make_shared<mutex>()) {} @@ -260,7 +260,7 @@ condition_variable_any::wait_for(_Lock& __lock, const chrono::duration<_Rep, _Pe return wait_until(__lock, chrono::steady_clock::now() + __d, std::move(__pred)); } -# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +# if _LIBCPP_STD_VER >= 20 template <class _Lock, class _Predicate> bool condition_variable_any::wait(_Lock& __user_lock, stop_token __stoken, _Predicate __pred) { @@ -341,7 +341,7 @@ bool condition_variable_any::wait_for( return wait_until(__lock, std::move(__stoken), chrono::steady_clock::now() + __rel_time, std::move(__pred)); } -# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +# endif // _LIBCPP_STD_VER >= 20 _LIBCPP_EXPORTED_FROM_ABI void notify_all_at_thread_exit(condition_variable&, unique_lock<mutex>); diff --git a/libcxx/include/version b/libcxx/include/version index dc1d3fd..5d679ca 100644 --- a/libcxx/include/version +++ b/libcxx/include/version @@ -417,7 +417,7 @@ __cpp_lib_void_t 201411L <type_traits> // # define __cpp_lib_is_layout_compatible 201907L # define __cpp_lib_is_nothrow_convertible 201806L // # define __cpp_lib_is_pointer_interconvertible 201907L -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && _LIBCPP_AVAILABILITY_HAS_SYNC +# if !defined(_LIBCPP_HAS_NO_THREADS) && _LIBCPP_AVAILABILITY_HAS_SYNC # define __cpp_lib_jthread 201911L # endif # if !defined(_LIBCPP_HAS_NO_THREADS) && _LIBCPP_AVAILABILITY_HAS_SYNC diff --git a/libcxx/modules/std/stop_token.inc b/libcxx/modules/std/stop_token.inc index ad24017..5daf460 100644 --- a/libcxx/modules/std/stop_token.inc +++ b/libcxx/modules/std/stop_token.inc @@ -9,7 +9,6 @@ export namespace std { #ifndef _LIBCPP_HAS_NO_THREADS -# ifdef _LIBCPP_ENABLE_EXPERIMENTAL // [stoptoken], class stop_Âtoken using std::stop_token; @@ -22,6 +21,5 @@ export namespace std { // [stopcallback], class template stop_Âcallback using std::stop_callback; -# endif // _LIBCPP_ENABLE_EXPERIMENTAL -#endif // _LIBCPP_HAS_NO_THREADS +#endif // _LIBCPP_HAS_NO_THREADS } // namespace std diff --git a/libcxx/modules/std/thread.inc b/libcxx/modules/std/thread.inc index 6504a39..61e3191 100644 --- a/libcxx/modules/std/thread.inc +++ b/libcxx/modules/std/thread.inc @@ -15,9 +15,7 @@ export namespace std { using std::swap; // [thread.jthread.class], class jthread -# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) using std::jthread; -# endif // [thread.thread.this], namespace this_thread namespace this_thread { diff --git a/libcxx/test/libcxx/experimental/fexperimental-library.compile.pass.cpp b/libcxx/test/libcxx/experimental/fexperimental-library.compile.pass.cpp index 3d50d23..fd06886 100644 --- a/libcxx/test/libcxx/experimental/fexperimental-library.compile.pass.cpp +++ b/libcxx/test/libcxx/experimental/fexperimental-library.compile.pass.cpp @@ -20,10 +20,6 @@ # error "-fexperimental-library should enable the PSTL" #endif -#ifdef _LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN -# error "-fexperimental-library should enable the stop_token" -#endif - #ifdef _LIBCPP_HAS_NO_EXPERIMENTAL_TZDB # error "-fexperimental-library should enable the chrono TZDB" #endif diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/stop_token.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/stop_token.version.compile.pass.cpp index 0d7811e..49233cd 100644 --- a/libcxx/test/std/language.support/support.limits/support.limits.general/stop_token.version.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.limits/support.limits.general/stop_token.version.compile.pass.cpp @@ -44,7 +44,7 @@ #elif TEST_STD_VER == 20 -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++20" # endif @@ -53,13 +53,13 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif #elif TEST_STD_VER == 23 -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++23" # endif @@ -68,13 +68,13 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif #elif TEST_STD_VER > 23 -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++26" # endif @@ -83,7 +83,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/thread.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/thread.version.compile.pass.cpp index 1735abb..9770023 100644 --- a/libcxx/test/std/language.support/support.limits/support.limits.general/thread.version.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.limits/support.limits.general/thread.version.compile.pass.cpp @@ -61,7 +61,7 @@ # error "__cpp_lib_formatters should not be defined before c++23" # endif -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++20" # endif @@ -70,7 +70,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif @@ -89,7 +89,7 @@ # endif # endif -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++23" # endif @@ -98,7 +98,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif @@ -117,7 +117,7 @@ # endif # endif -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++26" # endif @@ -126,7 +126,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp index a022c90..985ffef 100644 --- a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp +++ b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp @@ -3908,7 +3908,7 @@ # error "__cpp_lib_is_within_lifetime should not be defined before c++26" # endif -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++20" # endif @@ -3917,7 +3917,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif @@ -5375,7 +5375,7 @@ # error "__cpp_lib_is_within_lifetime should not be defined before c++26" # endif -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++23" # endif @@ -5384,7 +5384,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif @@ -7199,7 +7199,7 @@ # endif # endif -# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) +# if !defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC) # ifndef __cpp_lib_jthread # error "__cpp_lib_jthread should be defined in c++26" # endif @@ -7208,7 +7208,7 @@ # endif # else # ifdef __cpp_lib_jthread -# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" +# error "__cpp_lib_jthread should not be defined when the requirement '!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)' is not met!" # endif # endif diff --git a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_terminates.sh.cpp b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_terminates.sh.cpp index 2a73203..eab7a4f 100644 --- a/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_terminates.sh.cpp +++ b/libcxx/test/std/thread/thread.condition/thread.condition.condvarany/wait_terminates.sh.cpp @@ -137,7 +137,7 @@ int main(int argc, char **argv) { case 4: cv.wait_for(mut, wait, pred_function); break; case 5: cv.wait_until(mut, Clock::now() + wait); break; case 6: cv.wait_until(mut, Clock::now() + wait, pred_function); break; -#if TEST_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && _LIBCPP_AVAILABILITY_HAS_SYNC +#if TEST_STD_VER >= 20 && !(defined(_LIBCPP_VERSION) && !_LIBCPP_AVAILABILITY_HAS_SYNC) case 7: cv.wait(mut, std::stop_source{}.get_token(), pred_function); break; case 8: cv.wait_for(mut, std::stop_source{}.get_token(), wait, pred_function); break; case 9: cv.wait_until(mut, std::stop_source{}.get_token(), Clock::now() + wait, pred_function); break; @@ -146,7 +146,7 @@ int main(int argc, char **argv) { case 8: case 9: return 0; -#endif //TEST_STD_VER >=20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +#endif default: assert(false); } } catch (...) {} diff --git a/libcxx/test/support/make_test_thread.h b/libcxx/test/support/make_test_thread.h index 00190a8..7b44b647 100644 --- a/libcxx/test/support/make_test_thread.h +++ b/libcxx/test/support/make_test_thread.h @@ -28,12 +28,12 @@ namespace support { // but any other test that only creates threads as a side effect of testing should // work if they use the utilities in this file. -template <class F, class ...Args> -std::thread make_test_thread(F&& f, Args&& ...args) { - return std::thread(std::forward<F>(f), std::forward<Args>(args)...); +template <class F, class... Args> +std::thread make_test_thread(F&& f, Args&&... args) { + return std::thread(std::forward<F>(f), std::forward<Args>(args)...); } -#if TEST_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) +#if TEST_STD_VER >= 20 # ifdef _LIBCPP_VERSION # define TEST_AVAILABILITY_SYNC _LIBCPP_AVAILABILITY_SYNC # else diff --git a/libcxx/utils/generate_feature_test_macro_components.py b/libcxx/utils/generate_feature_test_macro_components.py index 3bdd3ad..cb5ff77 100755 --- a/libcxx/utils/generate_feature_test_macro_components.py +++ b/libcxx/utils/generate_feature_test_macro_components.py @@ -802,8 +802,8 @@ feature_test_macros = [ "name": "__cpp_lib_jthread", "values": {"c++20": 201911}, "headers": ["stop_token", "thread"], - "test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)", - "libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN) && _LIBCPP_AVAILABILITY_HAS_SYNC", + "test_suite_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && (!defined(_LIBCPP_VERSION) || _LIBCPP_AVAILABILITY_HAS_SYNC)", + "libcxx_guard": "!defined(_LIBCPP_HAS_NO_THREADS) && _LIBCPP_AVAILABILITY_HAS_SYNC", }, { "name": "__cpp_lib_latch", diff --git a/lld/COFF/Chunks.h b/lld/COFF/Chunks.h index 28e0fd6..040a249 100644 --- a/lld/COFF/Chunks.h +++ b/lld/COFF/Chunks.h @@ -544,12 +544,12 @@ static const uint8_t importThunkARM64[] = { 0x00, 0x02, 0x1f, 0xd6, // br x16 }; -static const uint32_t importThunkARM64EC[] = { - 0x9000000b, // adrp x11, 0x0 - 0xf940016b, // ldr x11, [x11] - 0x9000000a, // adrp x10, 0x0 - 0x9100014a, // add x10, x10, #0x0 - 0x14000000 // b 0x0 +static const uint8_t importThunkARM64EC[] = { + 0x0b, 0x00, 0x00, 0x90, // adrp x11, 0x0 + 0x6b, 0x01, 0x40, 0xf9, // ldr x11, [x11] + 0x0a, 0x00, 0x00, 0x90, // adrp x10, 0x0 + 0x4a, 0x01, 0x00, 0x91, // add x10, x10, #0x0 + 0x00, 0x00, 0x00, 0x14 // b 0x0 }; // Windows-specific. diff --git a/lldb/packages/Python/lldbsuite/test/lldbtest.py b/lldb/packages/Python/lldbsuite/test/lldbtest.py index e0da7cb..df5a110 100644 --- a/lldb/packages/Python/lldbsuite/test/lldbtest.py +++ b/lldb/packages/Python/lldbsuite/test/lldbtest.py @@ -1317,7 +1317,18 @@ class Base(unittest.TestCase): # Need to do something different for non-Linux/Android targets cpuinfo_path = self.getBuildArtifact("cpuinfo") if configuration.lldb_platform_name: - self.runCmd('platform get-file "/proc/cpuinfo" ' + cpuinfo_path) + self.runCmd( + 'platform get-file "/proc/cpuinfo" ' + cpuinfo_path, check=False + ) + if not self.res.Succeeded(): + if self.TraceOn(): + print( + 'Failed to get /proc/cpuinfo from remote: "{}"'.format( + self.res.GetOutput().strip() + ) + ) + print("All cpuinfo feature checks will fail.") + return "" else: cpuinfo_path = "/proc/cpuinfo" diff --git a/lldb/packages/Python/lldbsuite/test/lldbutil.py b/lldb/packages/Python/lldbsuite/test/lldbutil.py index 629565b..660a3c0 100644 --- a/lldb/packages/Python/lldbsuite/test/lldbutil.py +++ b/lldb/packages/Python/lldbsuite/test/lldbutil.py @@ -773,9 +773,16 @@ def get_threads_stopped_at_breakpoint_id(process, bpid): return threads for thread in stopped_threads: - # Make sure we've hit our breakpoint... - break_id = thread.GetStopReasonDataAtIndex(0) - if break_id == bpid: + # Make sure we've hit our breakpoint. + # From the docs of GetStopReasonDataAtIndex: "Breakpoint stop reasons + # will have data that consists of pairs of breakpoint IDs followed by + # the breakpoint location IDs". + # Iterate over all such pairs looking for `bpid`. + break_ids = [ + thread.GetStopReasonDataAtIndex(idx) + for idx in range(0, thread.GetStopReasonDataCount(), 2) + ] + if bpid in break_ids: threads.append(thread) return threads diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp index 5b9de6f..5b679bd 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp @@ -2932,14 +2932,21 @@ void DWARFASTParserClang::ParseSingleMember( last_field_info = this_field_info; last_field_info.SetIsBitfield(true); } else { - last_field_info.bit_offset = field_bit_offset; + FieldInfo this_field_info{.is_bitfield = false}; + this_field_info.bit_offset = field_bit_offset; + // TODO: we shouldn't silently ignore the bit_size if we fail + // to GetByteSize. if (std::optional<uint64_t> clang_type_size = member_type->GetByteSize(nullptr)) { - last_field_info.bit_size = *clang_type_size * character_width; + this_field_info.bit_size = *clang_type_size * character_width; } - last_field_info.SetIsBitfield(false); + if (this_field_info.GetFieldEnd() <= last_field_info.GetEffectiveFieldEnd()) + this_field_info.SetEffectiveFieldEnd( + last_field_info.GetEffectiveFieldEnd()); + + last_field_info = this_field_info; } // Don't turn artificial members such as vtable pointers into real FieldDecls @@ -3738,7 +3745,7 @@ void DWARFASTParserClang::AddUnnamedBitfieldToRecordTypeIfNeeded( const FieldInfo ¤t_field) { // TODO: get this value from target const uint64_t word_width = 32; - uint64_t last_field_end = previous_field.bit_offset + previous_field.bit_size; + uint64_t last_field_end = previous_field.GetEffectiveFieldEnd(); if (!previous_field.IsBitfield()) { // The last field was not a bit-field... diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h index 3809ee9..1ffb09b 100644 --- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h +++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.h @@ -258,9 +258,27 @@ protected: private: struct FieldInfo { + /// Size in bits that this field occupies. Can but + /// need not be the DW_AT_bit_size of the field. uint64_t bit_size = 0; + + /// Offset of this field in bits from the beginning + /// of the containing struct. Can but need not + /// be the DW_AT_data_bit_offset of the field. uint64_t bit_offset = 0; + + /// In case this field is folded into the storage + /// of a previous member's storage (for example + /// with [[no_unique_address]]), the effective field + /// end is the offset in bits from the beginning of + /// the containing struct where the field we were + /// folded into ended. + std::optional<uint64_t> effective_field_end; + + /// Set to 'true' if this field is a bit-field. bool is_bitfield = false; + + /// Set to 'true' if this field is DW_AT_artificial. bool is_artificial = false; FieldInfo() = default; @@ -276,6 +294,19 @@ private: // bit offset than any previous bitfield + size. return (bit_size + bit_offset) <= next_bit_offset; } + + /// Returns the offset in bits of where the storage this field + /// occupies ends. + uint64_t GetFieldEnd() const { return bit_size + bit_offset; } + + void SetEffectiveFieldEnd(uint64_t val) { effective_field_end = val; } + + /// If this field was folded into storage of a previous field, + /// returns the offset in bits of where that storage ends. Otherwise, + /// returns the regular field end (see \ref GetFieldEnd). + uint64_t GetEffectiveFieldEnd() const { + return effective_field_end.value_or(GetFieldEnd()); + } }; /// Parsed form of all attributes that are relevant for parsing type members. diff --git a/lldb/test/API/functionalities/watchpoint/categories b/lldb/test/API/functionalities/watchpoint/categories new file mode 100644 index 0000000..97934fb --- /dev/null +++ b/lldb/test/API/functionalities/watchpoint/categories @@ -0,0 +1,2 @@ +watchpoint + diff --git a/lldb/test/Shell/SymbolFile/DWARF/no_unique_address-with-bitfields.cpp b/lldb/test/Shell/SymbolFile/DWARF/no_unique_address-with-bitfields.cpp index 1c9cc36..980180e 100644 --- a/lldb/test/Shell/SymbolFile/DWARF/no_unique_address-with-bitfields.cpp +++ b/lldb/test/Shell/SymbolFile/DWARF/no_unique_address-with-bitfields.cpp @@ -1,10 +1,10 @@ -// LLDB currently erroneously adds an unnamed bitfield -// into the AST when an overlapping no_unique_address -// field precedes a bitfield. - // RUN: %clang --target=x86_64-apple-macosx -c -gdwarf -o %t %s // RUN: %lldb %t \ // RUN: -o "target var global" \ +// RUN: -o "target var global2" \ +// RUN: -o "target var global3" \ +// RUN: -o "target var global4" \ +// RUN: -o "target var global5" \ // RUN: -o "image dump ast" \ // RUN: -o exit | FileCheck %s @@ -12,12 +12,12 @@ // CHECK: CXXRecordDecl {{.*}} struct Foo definition // CHECK: |-FieldDecl {{.*}} data 'char[5]' // CHECK-NEXT: |-FieldDecl {{.*}} padding 'Empty' -// CHECK-NEXT: |-FieldDecl {{.*}} 'int' -// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 8 -// CHECK-NEXT: `-FieldDecl {{.*}} sloc> flag 'unsigned long' +// CHECK-NEXT: `-FieldDecl {{.*}} flag 'unsigned long' // CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1 struct Empty {}; +struct Empty2 {}; +struct Empty3 {}; struct Foo { char data[5]; @@ -26,3 +26,85 @@ struct Foo { }; Foo global; + +// CHECK: CXXRecordDecl {{.*}} struct ConsecutiveOverlap definition +// CHECK: |-FieldDecl {{.*}} data 'char[5]' +// CHECK-NEXT: |-FieldDecl {{.*}} p1 'Empty' +// CHECK-NEXT: |-FieldDecl {{.*}} p2 'Empty2' +// CHECK-NEXT: |-FieldDecl {{.*}} p3 'Empty3' +// CHECK-NEXT: `-FieldDecl {{.*}} flag 'unsigned long' +// CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1 + +struct ConsecutiveOverlap { + char data[5]; + [[no_unique_address]] Empty p1; + [[no_unique_address]] Empty2 p2; + [[no_unique_address]] Empty3 p3; + unsigned long flag : 1; +}; + +ConsecutiveOverlap global2; + +// FIXME: we fail to deduce the unnamed bitfields here. +// +// CHECK: CXXRecordDecl {{.*}} struct MultipleAtOffsetZero definition +// CHECK: |-FieldDecl {{.*}} data 'char[5]' +// CHECK-NEXT: |-FieldDecl {{.*}} p1 'Empty' +// CHECK-NEXT: |-FieldDecl {{.*}} f1 'unsigned long' +// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 1 +// CHECK-NEXT: |-FieldDecl {{.*}} p2 'Empty2' +// CHECK-NEXT: `-FieldDecl {{.*}} f2 'unsigned long' +// CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1 + +struct MultipleAtOffsetZero { + char data[5]; + [[no_unique_address]] Empty p1; + int : 4; + unsigned long f1 : 1; + [[no_unique_address]] Empty2 p2; + int : 4; + unsigned long f2 : 1; +}; + +MultipleAtOffsetZero global3; + +// FIXME: we fail to deduce the unnamed bitfields here. +// +// CHECK: CXXRecordDecl {{.*}} struct MultipleEmpty definition +// CHECK: |-FieldDecl {{.*}} data 'char[5]' +// CHECK-NEXT: |-FieldDecl {{.*}} p1 'Empty' +// CHECK-NEXT: |-FieldDecl {{.*}} f1 'unsigned long' +// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 1 +// CHECK-NEXT: |-FieldDecl {{.*}} p2 'Empty' +// CHECK-NEXT: `-FieldDecl {{.*}} f2 'unsigned long' +// CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1 + +struct MultipleEmpty { + char data[5]; + [[no_unique_address]] Empty p1; + int : 4; + unsigned long f1 : 1; + [[no_unique_address]] Empty p2; + int : 4; + unsigned long f2 : 1; +}; + +MultipleEmpty global4; + +// CHECK: CXXRecordDecl {{.*}} struct FieldBitfieldOverlap definition +// CHECK: |-FieldDecl {{.*}} a 'int' +// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 3 +// CHECK-NEXT: |-FieldDecl {{.*}} p1 'Empty' +// CHECK-NEXT: |-FieldDecl {{.*}} b 'int' +// CHECK-NEXT: | `-IntegerLiteral {{.*}} 'int' 6 +// CHECK-NEXT: `-FieldDecl {{.*}} c 'int' +// CHECK-NEXT: `-IntegerLiteral {{.*}} 'int' 1 + +struct FieldBitfieldOverlap { + int a : 3; + [[no_unique_address]] Empty p1; + int b : 6; + int c : 1; +}; + +FieldBitfieldOverlap global5; diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index 083d5c9..7535eb2 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -570,7 +570,7 @@ private: template <typename KeyArg, typename... ValueArgs> BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key, ValueArgs &&...Values) { - TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket); + TheBucket = InsertIntoBucketImpl(Key, TheBucket); TheBucket->getFirst() = std::forward<KeyArg>(Key); ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...); @@ -580,7 +580,7 @@ private: template <typename LookupKeyT> BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key, ValueT &&Value, LookupKeyT &Lookup) { - TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket); + TheBucket = InsertIntoBucketImpl(Lookup, TheBucket); TheBucket->getFirst() = std::move(Key); ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); @@ -588,8 +588,7 @@ private: } template <typename LookupKeyT> - BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup, - BucketT *TheBucket) { + BucketT *InsertIntoBucketImpl(const LookupKeyT &Lookup, BucketT *TheBucket) { incrementEpoch(); // If the load of the hash table is more than 3/4, or if fewer than 1/8 of diff --git a/llvm/include/llvm/Analysis/CtxProfAnalysis.h b/llvm/include/llvm/Analysis/CtxProfAnalysis.h index d3b7ba9..b3e64b2 100644 --- a/llvm/include/llvm/Analysis/CtxProfAnalysis.h +++ b/llvm/include/llvm/Analysis/CtxProfAnalysis.h @@ -9,7 +9,6 @@ #ifndef LLVM_ANALYSIS_CTXPROFANALYSIS_H #define LLVM_ANALYSIS_CTXPROFANALYSIS_H -#include "llvm/ADT/DenseMap.h" #include "llvm/IR/GlobalValue.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/IntrinsicInst.h" diff --git a/llvm/include/llvm/IR/IntrinsicsDirectX.td b/llvm/include/llvm/IR/IntrinsicsDirectX.td index f1017bd..97c6963 100644 --- a/llvm/include/llvm/IR/IntrinsicsDirectX.td +++ b/llvm/include/llvm/IR/IntrinsicsDirectX.td @@ -87,7 +87,7 @@ def int_dx_umad : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLV def int_dx_normalize : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]>; def int_dx_rcp : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; def int_dx_rsqrt : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; - def int_dx_wave_is_first_lane : DefaultAttrsIntrinsic<[llvm_i1_ty], [], [IntrConvergent]>; def int_dx_sign : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_any_ty]>; +def int_dx_step : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty, LLVMMatchType<0>]>; } diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td index 766fc0d..a4c0195 100644 --- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td +++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td @@ -67,6 +67,7 @@ let TargetPrefix = "spv" in { def int_spv_normalize : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]>; def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty]>; def int_spv_saturate : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; + def int_spv_step : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [LLVMMatchType<0>, llvm_anyfloat_ty]>; def int_spv_fdot : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], [llvm_anyfloat_ty, LLVMScalarOrSameVectorWidth<0, LLVMVectorElementType<0>>], diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td index fafa505..5262e31 100644 --- a/llvm/include/llvm/IR/IntrinsicsX86.td +++ b/llvm/include/llvm/IR/IntrinsicsX86.td @@ -5520,6 +5520,106 @@ let TargetPrefix = "x86" in { [IntrNoMem, ImmArg<ArgIndex<2>>]>; } +// conversion with saturation +let TargetPrefix = "x86" in { + def int_x86_avx10_vcvttss2sis : ClangBuiltin<"__builtin_ia32_vcvttss2sis32">, + DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttss2sis64 : ClangBuiltin<"__builtin_ia32_vcvttss2sis64">, + DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttss2usis : ClangBuiltin<"__builtin_ia32_vcvttss2usis32">, + DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttss2usis64 : ClangBuiltin<"__builtin_ia32_vcvttss2usis64">, + DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttsd2sis : ClangBuiltin<"__builtin_ia32_vcvttsd2sis32">, + DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttsd2sis64 : ClangBuiltin<"__builtin_ia32_vcvttsd2sis64">, + DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttsd2usis : ClangBuiltin<"__builtin_ia32_vcvttsd2usis32">, + DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_vcvttsd2usis64 : ClangBuiltin<"__builtin_ia32_vcvttsd2usis64">, + DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<1>>]>; + def int_x86_avx10_mask_vcvttpd2dqs_128 : ClangBuiltin<"__builtin_ia32_vcvttpd2dqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty, llvm_v4i32_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttpd2dqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttpd2dqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2dqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttpd2dqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2udqs_128 : ClangBuiltin<"__builtin_ia32_vcvttpd2udqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty,llvm_v4i32_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttpd2udqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttpd2udqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2udqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttpd2udqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2qqs_128 : ClangBuiltin<"__builtin_ia32_vcvttpd2qqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty,llvm_v2i64_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttpd2qqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttpd2qqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4f64_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2qqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttpd2qqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2uqqs_128 : ClangBuiltin<"__builtin_ia32_vcvttpd2uqqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty,llvm_v2i64_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttpd2uqqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttpd2uqqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4f64_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttpd2uqqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttpd2uqqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2dqs_128 : ClangBuiltin<"__builtin_ia32_vcvttps2dqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttps2dqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttps2dqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2dqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttps2dqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2udqs_128 : ClangBuiltin<"__builtin_ia32_vcvttps2udqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttps2udqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttps2udqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2udqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttps2udqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2qqs_128 : ClangBuiltin<"__builtin_ia32_vcvttps2qqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4f32_ty, llvm_v2i64_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttps2qqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttps2qqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4f32_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2qqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttps2qqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2uqqs_128 : ClangBuiltin<"__builtin_ia32_vcvttps2uqqs128_mask">, + DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4f32_ty,llvm_v2i64_ty, llvm_i8_ty], + [IntrNoMem]>; + def int_x86_avx10_mask_vcvttps2uqqs_round_256: ClangBuiltin<"__builtin_ia32_vcvttps2uqqs256_round_mask">, + DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4f32_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; + def int_x86_avx10_mask_vcvttps2uqqs_round_512 : ClangBuiltin<"__builtin_ia32_vcvttps2uqqs512_round_mask">, + DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<ArgIndex<3>>]>; +} + //===----------------------------------------------------------------------===// // SHA intrinsics let TargetPrefix = "x86" in { diff --git a/llvm/include/llvm/ProfileData/PGOCtxProfReader.h b/llvm/include/llvm/ProfileData/PGOCtxProfReader.h index e034819..beda07d 100644 --- a/llvm/include/llvm/ProfileData/PGOCtxProfReader.h +++ b/llvm/include/llvm/ProfileData/PGOCtxProfReader.h @@ -15,13 +15,11 @@ #ifndef LLVM_PROFILEDATA_CTXINSTRPROFILEREADER_H #define LLVM_PROFILEDATA_CTXINSTRPROFILEREADER_H -#include "llvm/ADT/DenseSet.h" #include "llvm/Bitstream/BitstreamReader.h" #include "llvm/IR/GlobalValue.h" #include "llvm/ProfileData/PGOCtxProfWriter.h" #include "llvm/Support/Error.h" #include <map> -#include <vector> namespace llvm { /// A node (context) in the loaded contextual profile, suitable for mutation @@ -34,7 +32,7 @@ namespace llvm { class PGOCtxProfContext final { public: using CallTargetMapTy = std::map<GlobalValue::GUID, PGOCtxProfContext>; - using CallsiteMapTy = DenseMap<uint32_t, CallTargetMapTy>; + using CallsiteMapTy = std::map<uint32_t, CallTargetMapTy>; private: friend class PGOCtxProfileReader; @@ -97,7 +95,16 @@ public: return Callsites.find(I)->second; } - void getContainedGuids(DenseSet<GlobalValue::GUID> &Guids) const; + /// Insert this node's GUID as well as the GUIDs of the transitive closure of + /// child nodes, into the provided set (technically, all that is required of + /// `TSetOfGUIDs` is to have an `insert(GUID)` member) + template <class TSetOfGUIDs> + void getContainedGuids(TSetOfGUIDs &Guids) const { + Guids.insert(GUID); + for (const auto &[_, Callsite] : Callsites) + for (const auto &[_, Callee] : Callsite) + Callee.getContainedGuids(Guids); + } }; class PGOCtxProfileReader final { diff --git a/llvm/include/llvm/Support/FormatVariadic.h b/llvm/include/llvm/Support/FormatVariadic.h index 005d26f..d0e647e 100644 --- a/llvm/include/llvm/Support/FormatVariadic.h +++ b/llvm/include/llvm/Support/FormatVariadic.h @@ -167,7 +167,7 @@ public: // Formats textual output. `Fmt` is a string consisting of one or more // replacement sequences with the following grammar: // -// rep_field ::= "{" index ["," layout] [":" format] "}" +// rep_field ::= "{" [index] ["," layout] [":" format] "}" // index ::= <non-negative integer> // layout ::= [[[char]loc]width] // format ::= <any string not containing "{" or "}"> @@ -175,8 +175,12 @@ public: // loc ::= "-" | "=" | "+" // width ::= <positive integer> // -// index - A non-negative integer specifying the index of the item in the -// parameter pack to print. Any other value is invalid. +// index - An optional non-negative integer specifying the index of the item +// in the parameter pack to print. Any other value is invalid. If its +// not specified, it will be automatically assigned a value based on +// the order of rep_field seen in the format string. Note that mixing +// automatic and explicit index in the same call is an error and will +// fail validation in assert-enabled builds. // layout - A string controlling how the field is laid out within the available // space. // format - A type-dependent string used to provide additional options to diff --git a/llvm/lib/CodeGen/InitUndef.cpp b/llvm/lib/CodeGen/InitUndef.cpp index d6f7c0d..a89c823 100644 --- a/llvm/lib/CodeGen/InitUndef.cpp +++ b/llvm/lib/CodeGen/InitUndef.cpp @@ -272,6 +272,7 @@ bool InitUndef::runOnMachineFunction(MachineFunction &MF) { for (auto *DeadMI : DeadInsts) DeadMI->eraseFromParent(); DeadInsts.clear(); + NewRegs.clear(); return Changed; } diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index db33d52..53ce219 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -635,7 +635,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node, void InstrEmitter::EmitCopyToRegClassNode(SDNode *Node, DenseMap<SDValue, Register> &VRBaseMap) { - unsigned VReg = getVR(Node->getOperand(0), VRBaseMap); + Register VReg = getVR(Node->getOperand(0), VRBaseMap); // Create the new VReg in the destination class and emit a copy. unsigned DstRCIdx = Node->getConstantOperandVal(1); @@ -678,7 +678,7 @@ void InstrEmitter::EmitRegSequence(SDNode *Node, // insert copies for them in TwoAddressInstructionPass anyway. if (!R || !R->getReg().isPhysical()) { unsigned SubIdx = Op->getAsZExtVal(); - unsigned SubReg = getVR(Node->getOperand(i-1), VRBaseMap); + Register SubReg = getVR(Node->getOperand(i - 1), VRBaseMap); const TargetRegisterClass *TRC = MRI->getRegClass(SubReg); const TargetRegisterClass *SRC = TRI->getMatchingSuperRegClass(RC, TRC, SubIdx); @@ -1274,7 +1274,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, break; } case ISD::CopyFromReg: { - unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); + Register SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg(); EmitCopyFromReg(Node, 0, IsClone, SrcReg, VRBaseMap); break; } @@ -1343,7 +1343,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, SmallVector<unsigned, 8> GroupIdx; // Remember registers that are part of early-clobber defs. - SmallVector<unsigned, 8> ECRegs; + SmallVector<Register, 8> ECRegs; // Add all of the operand registers to the instruction. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { @@ -1424,7 +1424,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, // used), but this does not match the semantics of our early-clobber flag. // If an early-clobber operand register is also an input operand register, // then remove the early-clobber flag. - for (unsigned Reg : ECRegs) { + for (Register Reg : ECRegs) { if (MIB->readsRegister(Reg, TRI)) { MachineOperand *MO = MIB->findRegisterDefOperand(Reg, TRI, false, false); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 2fa9e46..c622b2a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -3952,19 +3952,9 @@ void DAGTypeLegalizer::ExpandIntRes_FP_TO_XINT(SDNode *N, SDValue &Lo, if (getTypeAction(Op.getValueType()) == TargetLowering::TypePromoteFloat) Op = GetPromotedFloat(Op); - if (getTypeAction(Op.getValueType()) == TargetLowering::TypeSoftPromoteHalf) { - EVT OFPVT = Op.getValueType(); - EVT NFPVT = TLI.getTypeToTransformTo(*DAG.getContext(), OFPVT); - Op = GetSoftPromotedHalf(Op); - Op = DAG.getNode(OFPVT == MVT::f16 ? ISD::FP16_TO_FP : ISD::BF16_TO_FP, dl, - NFPVT, Op); - Op = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl, VT, Op); - SplitInteger(Op, Lo, Hi); - return; - } - - if (Op.getValueType() == MVT::bf16) { - // Extend to f32 as there is no bf16 libcall. + // If the input is bf16 or needs to be soft promoted, extend to f32. + if (getTypeAction(Op.getValueType()) == TargetLowering::TypeSoftPromoteHalf || + Op.getValueType() == MVT::bf16) { Op = fpExtendHelper(Op, Chain, IsStrict, MVT::f32, dl, DAG); } diff --git a/llvm/lib/ProfileData/PGOCtxProfReader.cpp b/llvm/lib/ProfileData/PGOCtxProfReader.cpp index 8354e30..496854e 100644 --- a/llvm/lib/ProfileData/PGOCtxProfReader.cpp +++ b/llvm/lib/ProfileData/PGOCtxProfReader.cpp @@ -44,14 +44,6 @@ PGOCtxProfContext::getOrEmplace(uint32_t Index, GlobalValue::GUID G, return Iter->second; } -void PGOCtxProfContext::getContainedGuids( - DenseSet<GlobalValue::GUID> &Guids) const { - Guids.insert(GUID); - for (const auto &[_, Callsite] : Callsites) - for (const auto &[_, Callee] : Callsite) - Callee.getContainedGuids(Guids); -} - Expected<BitstreamEntry> PGOCtxProfileReader::advance() { return Cursor.advance(BitstreamCursor::AF_DontAutoprocessAbbrevs); } diff --git a/llvm/lib/Support/FormatVariadic.cpp b/llvm/lib/Support/FormatVariadic.cpp index 9056466..3240b1c 100644 --- a/llvm/lib/Support/FormatVariadic.cpp +++ b/llvm/lib/Support/FormatVariadic.cpp @@ -63,16 +63,18 @@ static std::optional<ReplacementItem> parseReplacementItem(StringRef Spec) { unsigned Align = 0; AlignStyle Where = AlignStyle::Right; StringRef Options; - unsigned Index = 0; + unsigned Index = ~0U; RepString = RepString.trim(); - if (RepString.consumeInteger(0, Index)) { - assert(false && "Invalid replacement sequence index!"); - return std::nullopt; - } + + // If index is not specified, keep it ~0U to indicate unresolved index. + RepString.consumeInteger(0, Index); RepString = RepString.trim(); + if (RepString.consume_front(",")) { - if (!consumeFieldLayout(RepString, Where, Align, Pad)) + if (!consumeFieldLayout(RepString, Where, Align, Pad)) { assert(false && "Invalid replacement field layout specification!"); + return std::nullopt; + } } RepString = RepString.trim(); if (RepString.consume_front(":")) { @@ -80,8 +82,10 @@ static std::optional<ReplacementItem> parseReplacementItem(StringRef Spec) { RepString = StringRef(); } RepString = RepString.trim(); - assert(RepString.empty() && - "Unexpected characters found in replacement string!"); + if (!RepString.empty()) { + assert(0 && "Unexpected characters found in replacement string!"); + return std::nullopt; + } return ReplacementItem(Spec, Index, Align, Where, Pad, Options); } @@ -139,6 +143,7 @@ SmallVector<ReplacementItem, 2> formatv_object_base::parseFormatString(StringRef Fmt, size_t NumArgs, bool Validate) { SmallVector<ReplacementItem, 2> Replacements; + unsigned NextAutomaticIndex = 0; #if ENABLE_VALIDATION const StringRef SavedFmtStr = Fmt; @@ -150,6 +155,9 @@ formatv_object_base::parseFormatString(StringRef Fmt, size_t NumArgs, std::tie(I, Fmt) = splitLiteralAndReplacement(Fmt); if (!I) continue; + if (I->Index == ~0U) + I->Index = NextAutomaticIndex++; + Replacements.emplace_back(*I); #if ENABLE_VALIDATION if (I->Type == ReplacementType::Format) @@ -175,9 +183,8 @@ formatv_object_base::parseFormatString(StringRef Fmt, size_t NumArgs, }; if (NumExpectedArgs != NumArgs) { - errs() << formatv( - "Expected {0} Args, but got {1} for format string '{2}'\n", - NumExpectedArgs, NumArgs, SavedFmtStr); + errs() << formatv("Expected {} Args, but got {} for format string '{}'\n", + NumExpectedArgs, NumArgs, SavedFmtStr); assert(0 && "Invalid formatv() call"); return getErrorReplacements("Unexpected number of arguments"); } @@ -195,11 +202,21 @@ formatv_object_base::parseFormatString(StringRef Fmt, size_t NumArgs, if (Count != NumExpectedArgs) { errs() << formatv( - "Replacement field indices cannot have holes for format string '{0}'\n", + "Replacement field indices cannot have holes for format string '{}'\n", SavedFmtStr); assert(0 && "Invalid format string"); return getErrorReplacements("Replacement indices have holes"); } + + // If we had automatic numbering of replacement indices, verify that all + // indices used automatic numbering. + if (NextAutomaticIndex != 0 && NextAutomaticIndex != Count) { + errs() << formatv( + "Cannot mix automatic and explicit indices for format string '{}'\n", + SavedFmtStr); + assert(0 && "Invalid format string"); + return getErrorReplacements("Cannot mix automatic and explicit indices"); + } #endif // ENABLE_VALIDATION return Replacements; } diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td index 4a009f8..8a7d2af 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td @@ -74,227 +74,227 @@ def : WriteRes<WriteLDHi, []> { let Latency = 4; } //===----------------------------------------------------------------------===// // Define generic 1 micro-op types -def N2Write_1cyc_1B : SchedWriteRes<[N2UnitB]> { let Latency = 1; } -def N2Write_1cyc_1I : SchedWriteRes<[N2UnitI]> { let Latency = 1; } -def N2Write_1cyc_1M : SchedWriteRes<[N2UnitM]> { let Latency = 1; } -def N2Write_1cyc_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 1; } -def N2Write_1cyc_1L01 : SchedWriteRes<[N2UnitL01]> { let Latency = 1; } -def N2Write_2cyc_1M : SchedWriteRes<[N2UnitM]> { let Latency = 2; } -def N2Write_3cyc_1M : SchedWriteRes<[N2UnitM]> { let Latency = 3; } -def N2Write_2cyc_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 2; - let ReleaseAtCycles = [2]; } -def N2Write_3cyc_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 3; - let ReleaseAtCycles = [3]; } -def N2Write_5cyc_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 5; - let ReleaseAtCycles = [5]; } -def N2Write_12cyc_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 12; - let ReleaseAtCycles = [12]; } -def N2Write_20cyc_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 20; - let ReleaseAtCycles = [20]; } -def N2Write_4cyc_1L : SchedWriteRes<[N2UnitL]> { let Latency = 4; } -def N2Write_6cyc_1L : SchedWriteRes<[N2UnitL]> { let Latency = 6; } -def N2Write_2cyc_1V : SchedWriteRes<[N2UnitV]> { let Latency = 2; } -def N2Write_3cyc_1V : SchedWriteRes<[N2UnitV]> { let Latency = 3; } -def N2Write_4cyc_1V : SchedWriteRes<[N2UnitV]> { let Latency = 4; } -def N2Write_5cyc_1V : SchedWriteRes<[N2UnitV]> { let Latency = 5; } -def N2Write_12cyc_1V : SchedWriteRes<[N2UnitV]> { let Latency = 12; } -def N2Write_2cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 2; } -def N2Write_3cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 3; } -def N2Write_4cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 4; } -def N2Write_7cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 7; - let ReleaseAtCycles = [7]; } -def N2Write_9cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 9; } -def N2Write_10cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 10; } -def N2Write_12cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 12; } -def N2Write_13cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 13; } -def N2Write_15cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 15; } -def N2Write_16cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 16; } -def N2Write_20cyc_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 20; } -def N2Write_2cyc_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 2; } -def N2Write_3cyc_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 3; } -def N2Write_4cyc_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 4; } -def N2Write_6cyc_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 6; } -def N2Write_10cyc_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 10; } -def N2Write_6cyc_1L01 : SchedWriteRes<[N2UnitL01]> { let Latency = 6; } +def N2Write_1c_1B : SchedWriteRes<[N2UnitB]> { let Latency = 1; } +def N2Write_1c_1I : SchedWriteRes<[N2UnitI]> { let Latency = 1; } +def N2Write_1c_1M : SchedWriteRes<[N2UnitM]> { let Latency = 1; } +def N2Write_1c_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 1; } +def N2Write_1c_1L01 : SchedWriteRes<[N2UnitL01]> { let Latency = 1; } +def N2Write_2c_1M : SchedWriteRes<[N2UnitM]> { let Latency = 2; } +def N2Write_3c_1M : SchedWriteRes<[N2UnitM]> { let Latency = 3; } +def N2Write_2c_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 2; + let ReleaseAtCycles = [2]; } +def N2Write_3c_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 3; + let ReleaseAtCycles = [3]; } +def N2Write_5c_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 5; + let ReleaseAtCycles = [5]; } +def N2Write_12c_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 12; + let ReleaseAtCycles = [12]; } +def N2Write_20c_1M0 : SchedWriteRes<[N2UnitM0]> { let Latency = 20; + let ReleaseAtCycles = [20]; } +def N2Write_4c_1L : SchedWriteRes<[N2UnitL]> { let Latency = 4; } +def N2Write_6c_1L : SchedWriteRes<[N2UnitL]> { let Latency = 6; } +def N2Write_2c_1V : SchedWriteRes<[N2UnitV]> { let Latency = 2; } +def N2Write_3c_1V : SchedWriteRes<[N2UnitV]> { let Latency = 3; } +def N2Write_4c_1V : SchedWriteRes<[N2UnitV]> { let Latency = 4; } +def N2Write_5c_1V : SchedWriteRes<[N2UnitV]> { let Latency = 5; } +def N2Write_12c_1V : SchedWriteRes<[N2UnitV]> { let Latency = 12; } +def N2Write_2c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 2; } +def N2Write_3c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 3; } +def N2Write_4c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 4; } +def N2Write_7c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 7; + let ReleaseAtCycles = [7]; } +def N2Write_9c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 9; } +def N2Write_10c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 10; } +def N2Write_12c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 12; } +def N2Write_13c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 13; } +def N2Write_15c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 15; } +def N2Write_16c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 16; } +def N2Write_20c_1V0 : SchedWriteRes<[N2UnitV0]> { let Latency = 20; } +def N2Write_2c_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 2; } +def N2Write_3c_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 3; } +def N2Write_4c_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 4; } +def N2Write_6c_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 6; } +def N2Write_10c_1V1 : SchedWriteRes<[N2UnitV1]> { let Latency = 10; } +def N2Write_6c_1L01 : SchedWriteRes<[N2UnitL01]> { let Latency = 6; } //===----------------------------------------------------------------------===// // Define generic 2 micro-op types -def N2Write_1cyc_1B_1S : SchedWriteRes<[N2UnitB, N2UnitS]> { +def N2Write_1c_1B_1S : SchedWriteRes<[N2UnitB, N2UnitS]> { let Latency = 1; let NumMicroOps = 2; } -def N2Write_6cyc_1M0_1B : SchedWriteRes<[N2UnitM0, N2UnitB]> { +def N2Write_6c_1M0_1B : SchedWriteRes<[N2UnitM0, N2UnitB]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_9cyc_1M0_1L : SchedWriteRes<[N2UnitM0, N2UnitL]> { +def N2Write_9c_1M0_1L : SchedWriteRes<[N2UnitM0, N2UnitL]> { let Latency = 9; let NumMicroOps = 2; } -def N2Write_3cyc_1I_1M : SchedWriteRes<[N2UnitI, N2UnitM]> { +def N2Write_3c_1I_1M : SchedWriteRes<[N2UnitI, N2UnitM]> { let Latency = 3; let NumMicroOps = 2; } -def N2Write_4cyc_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { +def N2Write_4c_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { let Latency = 4; let NumMicroOps = 2; } -def N2Write_5cyc_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { +def N2Write_5c_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { let Latency = 5; let NumMicroOps = 2; } -def N2Write_6cyc_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { +def N2Write_6c_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_7cyc_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { +def N2Write_7c_1I_1L : SchedWriteRes<[N2UnitI, N2UnitL]> { let Latency = 7; let NumMicroOps = 2; } -def N2Write_1cyc_1L01_1D : SchedWriteRes<[N2UnitL01, N2UnitD]> { +def N2Write_1c_1L01_1D : SchedWriteRes<[N2UnitL01, N2UnitD]> { let Latency = 1; let NumMicroOps = 2; } -def N2Write_5cyc_1M0_1V : SchedWriteRes<[N2UnitM0, N2UnitV]> { +def N2Write_5c_1M0_1V : SchedWriteRes<[N2UnitM0, N2UnitV]> { let Latency = 5; let NumMicroOps = 2; } -def N2Write_2cyc_1L01_1V : SchedWriteRes<[N2UnitL01, N2UnitV]> { +def N2Write_2c_1L01_1V : SchedWriteRes<[N2UnitL01, N2UnitV]> { let Latency = 2; let NumMicroOps = 2; } -def N2Write_4cyc_1V1_1V : SchedWriteRes<[N2UnitV1, N2UnitV]> { +def N2Write_4c_1V1_1V : SchedWriteRes<[N2UnitV1, N2UnitV]> { let Latency = 4; let NumMicroOps = 2; } -def N2Write_4cyc_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { +def N2Write_4c_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { let Latency = 4; let NumMicroOps = 2; } -def N2Write_10cyc_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { +def N2Write_10c_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { let Latency = 10; let NumMicroOps = 2; let ReleaseAtCycles = [5, 5]; } -def N2Write_13cyc_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { +def N2Write_13c_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { let Latency = 13; let NumMicroOps = 2; let ReleaseAtCycles = [6, 7]; } -def N2Write_15cyc_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { +def N2Write_15c_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { let Latency = 15; let NumMicroOps = 2; let ReleaseAtCycles = [7, 8]; } -def N2Write_16cyc_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { +def N2Write_16c_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { let Latency = 16; let NumMicroOps = 2; let ReleaseAtCycles = [8, 8]; } -def N2Write_4cyc_2V : SchedWriteRes<[N2UnitV, N2UnitV]> { +def N2Write_4c_2V : SchedWriteRes<[N2UnitV, N2UnitV]> { let Latency = 4; let NumMicroOps = 2; } -def N2Write_6cyc_2V : SchedWriteRes<[N2UnitV, N2UnitV]> { +def N2Write_6c_2V : SchedWriteRes<[N2UnitV, N2UnitV]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_6cyc_2L : SchedWriteRes<[N2UnitL, N2UnitL]> { +def N2Write_6c_2L : SchedWriteRes<[N2UnitL, N2UnitL]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_8cyc_1L_1V : SchedWriteRes<[N2UnitL, N2UnitV]> { +def N2Write_8c_1L_1V : SchedWriteRes<[N2UnitL, N2UnitV]> { let Latency = 8; let NumMicroOps = 2; } -def N2Write_4cyc_1L01_1V : SchedWriteRes<[N2UnitL01, N2UnitV]> { +def N2Write_4c_1L01_1V : SchedWriteRes<[N2UnitL01, N2UnitV]> { let Latency = 4; let NumMicroOps = 2; } -def N2Write_3cyc_1M0_1M : SchedWriteRes<[N2UnitM0, N2UnitM]> { +def N2Write_3c_1M0_1M : SchedWriteRes<[N2UnitM0, N2UnitM]> { let Latency = 3; let NumMicroOps = 2; } -def N2Write_2cyc_1M0_1M : SchedWriteRes<[N2UnitM0, N2UnitM]> { +def N2Write_2c_1M0_1M : SchedWriteRes<[N2UnitM0, N2UnitM]> { let Latency = 2; let NumMicroOps = 2; } -def N2Write_6cyc_2V1 : SchedWriteRes<[N2UnitV1, N2UnitV1]> { +def N2Write_6c_2V1 : SchedWriteRes<[N2UnitV1, N2UnitV1]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_4cyc_1V0_1M : SchedWriteRes<[N2UnitV0, N2UnitM]> { +def N2Write_4c_1V0_1M : SchedWriteRes<[N2UnitV0, N2UnitM]> { let Latency = 4; let NumMicroOps = 2; } -def N2Write_5cyc_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { +def N2Write_5c_2V0 : SchedWriteRes<[N2UnitV0, N2UnitV0]> { let Latency = 5; let NumMicroOps = 2; } -def N2Write_5cyc_1V1_1M0 : SchedWriteRes<[N2UnitV1, N2UnitM0]> { +def N2Write_5c_1V1_1M0 : SchedWriteRes<[N2UnitV1, N2UnitM0]> { let Latency = 5; let NumMicroOps = 2; } -def N2Write_7cyc_1M0_1V0 : SchedWriteRes<[N2UnitM0, N2UnitV0]> { +def N2Write_7c_1M0_1V0 : SchedWriteRes<[N2UnitM0, N2UnitV0]> { let Latency = 7; let NumMicroOps = 2; } -def N2Write_2cyc_1V0_1M : SchedWriteRes<[N2UnitV0, N2UnitM]> { +def N2Write_2c_1V0_1M : SchedWriteRes<[N2UnitV0, N2UnitM]> { let Latency = 2; let NumMicroOps = 2; } -def N2Write_6cyc_1V_1V1 : SchedWriteRes<[N2UnitV, N2UnitV1]> { +def N2Write_6c_1V_1V1 : SchedWriteRes<[N2UnitV, N2UnitV1]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_6cyc_1L_1M : SchedWriteRes<[N2UnitL, N2UnitM]> { +def N2Write_6c_1L_1M : SchedWriteRes<[N2UnitL, N2UnitM]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_6cyc_1L_1S : SchedWriteRes<[N2UnitL, N2UnitS]> { +def N2Write_6c_1L_1S : SchedWriteRes<[N2UnitL, N2UnitS]> { let Latency = 6; let NumMicroOps = 2; } -def N2Write_9cyc_1L_1V : SchedWriteRes<[N2UnitL, N2UnitV]> { +def N2Write_9c_1L_1V : SchedWriteRes<[N2UnitL, N2UnitV]> { let Latency = 9; let NumMicroOps = 2; } -def N2Write_4cyc_2V1 : SchedWriteRes<[N2UnitV1, N2UnitV1]> { +def N2Write_4c_2V1 : SchedWriteRes<[N2UnitV1, N2UnitV1]> { let Latency = 4; let NumMicroOps = 2; } @@ -302,52 +302,52 @@ def N2Write_4cyc_2V1 : SchedWriteRes<[N2UnitV1, N2UnitV1]> { //===----------------------------------------------------------------------===// // Define generic 3 micro-op types -def N2Write_1cyc_1L01_1D_1I : SchedWriteRes<[N2UnitL01, N2UnitD, N2UnitI]> { +def N2Write_1c_1L01_1D_1I : SchedWriteRes<[N2UnitL01, N2UnitD, N2UnitI]> { let Latency = 1; let NumMicroOps = 3; } -def N2Write_2cyc_1L01_1V_1I : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitI]> { +def N2Write_2c_1L01_1V_1I : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitI]> { let Latency = 2; let NumMicroOps = 3; } -def N2Write_2cyc_1L01_2V : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitV]> { +def N2Write_2c_1L01_2V : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitV]> { let Latency = 2; let NumMicroOps = 3; } -def N2Write_7cyc_1M_1M0_1V : SchedWriteRes<[N2UnitM, N2UnitM0, N2UnitV]> { +def N2Write_7c_1M_1M0_1V : SchedWriteRes<[N2UnitM, N2UnitM0, N2UnitV]> { let Latency = 7; let NumMicroOps = 3; } -def N2Write_8cyc_1M0_1V1_1V : SchedWriteRes<[N2UnitM0, N2UnitV1, N2UnitV]> { +def N2Write_8c_1M0_1V1_1V : SchedWriteRes<[N2UnitM0, N2UnitV1, N2UnitV]> { let Latency = 8; let NumMicroOps = 3; } -def N2Write_10cyc_1V_1L_1S : SchedWriteRes<[N2UnitV, N2UnitL, N2UnitL]> { +def N2Write_10c_1V_1L_1S : SchedWriteRes<[N2UnitV, N2UnitL, N2UnitL]> { let Latency = 10; let NumMicroOps = 3; } -def N2Write_2cyc_1L01_1S_1V : SchedWriteRes<[N2UnitL01, N2UnitS, N2UnitV]> { +def N2Write_2c_1L01_1S_1V : SchedWriteRes<[N2UnitL01, N2UnitS, N2UnitV]> { let Latency = 2; let NumMicroOps = 3; } -def N2Write_4cyc_1L01_1S_1V : SchedWriteRes<[N2UnitL01, N2UnitS, N2UnitV]> { +def N2Write_4c_1L01_1S_1V : SchedWriteRes<[N2UnitL01, N2UnitS, N2UnitV]> { let Latency = 4; let NumMicroOps = 3; } -def N2Write_6cyc_3L : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL]> { +def N2Write_6c_3L : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL]> { let Latency = 6; let NumMicroOps = 3; } -def N2Write_8cyc_1L_2V : SchedWriteRes<[N2UnitL, N2UnitV, N2UnitV]> { +def N2Write_8c_1L_2V : SchedWriteRes<[N2UnitL, N2UnitV, N2UnitV]> { let Latency = 8; let NumMicroOps = 3; } @@ -355,102 +355,102 @@ def N2Write_8cyc_1L_2V : SchedWriteRes<[N2UnitL, N2UnitV, N2UnitV]> { //===----------------------------------------------------------------------===// // Define generic 4 micro-op types -def N2Write_2cyc_1L01_2V_1I : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitV, - N2UnitI]> { +def N2Write_2c_1L01_2V_1I : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitV, + N2UnitI]> { let Latency = 2; let NumMicroOps = 4; } -def N2Write_6cyc_4V0 : SchedWriteRes<[N2UnitV0, N2UnitV0, N2UnitV0, N2UnitV0]> { +def N2Write_6c_4V0 : SchedWriteRes<[N2UnitV0, N2UnitV0, N2UnitV0, N2UnitV0]> { let Latency = 6; let NumMicroOps = 4; } -def N2Write_4cyc_4V : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_4c_4V : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 4; let NumMicroOps = 4; } -def N2Write_6cyc_4V : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_6c_4V : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 6; let NumMicroOps = 4; } -def N2Write_8cyc_2L_2V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV]> { +def N2Write_8c_2L_2V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV]> { let Latency = 8; let NumMicroOps = 4; } -def N2Write_9cyc_2L_2V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV]> { +def N2Write_9c_2L_2V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV]> { let Latency = 9; let NumMicroOps = 4; } -def N2Write_2cyc_2L01_2V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitV, - N2UnitV]> { +def N2Write_2c_2L01_2V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitV, + N2UnitV]> { let Latency = 2; let NumMicroOps = 4; } -def N2Write_4cyc_2L01_2V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitV, - N2UnitV]> { +def N2Write_4c_2L01_2V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitV, + N2UnitV]> { let Latency = 4; let NumMicroOps = 4; } -def N2Write_5cyc_2L01_2V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitV, - N2UnitV]> { +def N2Write_5c_2L01_2V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitV, + N2UnitV]> { let Latency = 5; let NumMicroOps = 4; } -def N2Write_8cyc_2M0_2V0 : SchedWriteRes<[N2UnitM0, N2UnitM0, N2UnitV0, - N2UnitV0]> { +def N2Write_8c_2M0_2V0 : SchedWriteRes<[N2UnitM0, N2UnitM0, N2UnitV0, + N2UnitV0]> { let Latency = 8; let NumMicroOps = 4; } -def N2Write_11cyc_2V_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, - N2UnitV1]> { +def N2Write_11c_2V_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, + N2UnitV1]> { let Latency = 11; let NumMicroOps = 4; } -def N2Write_9cyc_2V_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, - N2UnitV1]> { +def N2Write_9c_2V_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, + N2UnitV1]> { let Latency = 9; let NumMicroOps = 4; } -def N2Write_8cyc_2V_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, - N2UnitV1]> { +def N2Write_8c_2V_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, + N2UnitV1]> { let Latency = 8; let NumMicroOps = 4; } -def N2Write_10cyc_2L_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, - N2UnitV1]> { +def N2Write_10c_2L_2V1 : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV1, + N2UnitV1]> { let Latency = 10; let NumMicroOps = 4; } -def N2Write_10cyc_2L_2V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV]> { +def N2Write_10c_2L_2V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV]> { let Latency = 10; let NumMicroOps = 4; } -def N2Write_4cyc_2M0_2M : SchedWriteRes<[N2UnitM0, N2UnitM0, N2UnitM, - N2UnitM]> { +def N2Write_4c_2M0_2M : SchedWriteRes<[N2UnitM0, N2UnitM0, N2UnitM, + N2UnitM]> { let Latency = 4; let NumMicroOps = 4; } -def N2Write_6cyc_2I_2L : SchedWriteRes<[N2UnitI, N2UnitI, N2UnitL, N2UnitL]> { +def N2Write_6c_2I_2L : SchedWriteRes<[N2UnitI, N2UnitI, N2UnitL, N2UnitL]> { let Latency = 6; let NumMicroOps = 4; } -def N2Write_7cyc_4L : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL]> { +def N2Write_7c_4L : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL]> { let Latency = 7; let NumMicroOps = 4; } @@ -458,14 +458,14 @@ def N2Write_7cyc_4L : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL]> { //===----------------------------------------------------------------------===// // Define generic 5 micro-op types -def N2Write_2cyc_1L01_2V_2I : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitV, - N2UnitI, N2UnitI]> { +def N2Write_2c_1L01_2V_2I : SchedWriteRes<[N2UnitL01, N2UnitV, N2UnitV, + N2UnitI, N2UnitI]> { let Latency = 2; let NumMicroOps = 5; } -def N2Write_8cyc_2L_3V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV, - N2UnitV]> { +def N2Write_8c_2L_3V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV, + N2UnitV]> { let Latency = 8; let NumMicroOps = 5; } @@ -473,32 +473,32 @@ def N2Write_8cyc_2L_3V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV, //===----------------------------------------------------------------------===// // Define generic 6 micro-op types -def N2Write_8cyc_3L_3V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_8c_3L_3V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 8; let NumMicroOps = 6; } -def N2Write_2cyc_3L01_3V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_2c_3L01_3V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 2; let NumMicroOps = 6; } -def N2Write_6cyc_3L01_3V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_6c_3L01_3V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 6; let NumMicroOps = 6; } -def N2Write_4cyc_3L01_3V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_4c_3L01_3V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 4; let NumMicroOps = 6; } -def N2Write_10cyc_2L_2V_2S : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV, - N2UnitS, N2UnitS]> { +def N2Write_10c_2L_2V_2S : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV, + N2UnitS, N2UnitS]> { let Latency = 10; let NumMicroOps = 6; } @@ -506,8 +506,8 @@ def N2Write_10cyc_2L_2V_2S : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitV, N2UnitV, //===----------------------------------------------------------------------===// // Define generic 7 micro-op types -def N2Write_8cyc_3L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, - N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_8c_3L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, + N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 8; let NumMicroOps = 7; } @@ -515,34 +515,34 @@ def N2Write_8cyc_3L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, //===----------------------------------------------------------------------===// // Define generic 8 micro-op types -def N2Write_6cyc_8V : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV, N2UnitV, - N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_6c_8V : SchedWriteRes<[N2UnitV, N2UnitV, N2UnitV, N2UnitV, + N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 6; let NumMicroOps = 8; } -def N2Write_2cyc_4L01_4V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitV, N2UnitV, N2UnitV, - N2UnitV]> { +def N2Write_2c_4L01_4V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitV, N2UnitV, N2UnitV, + N2UnitV]> { let Latency = 2; let NumMicroOps = 8; } -def N2Write_5cyc_4L01_4V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitV, N2UnitV, N2UnitV, - N2UnitV]> { +def N2Write_5c_4L01_4V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitV, N2UnitV, N2UnitV, + N2UnitV]> { let Latency = 5; let NumMicroOps = 8; } -def N2Write_8cyc_4L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL, - N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_8c_4L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL, + N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 8; let NumMicroOps = 8; } -def N2Write_9cyc_4L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL, - N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_9c_4L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL, + N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 9; let NumMicroOps = 8; } @@ -550,9 +550,9 @@ def N2Write_9cyc_4L_4V : SchedWriteRes<[N2UnitL, N2UnitL, N2UnitL, N2UnitL, //===----------------------------------------------------------------------===// // Define generic 10 micro-op types -def N2Write_7cyc_5L01_5V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitV, - N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_7c_5L01_5V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitV, + N2UnitV, N2UnitV, N2UnitV, N2UnitV]> { let Latency = 7; let NumMicroOps = 10; } @@ -560,10 +560,10 @@ def N2Write_7cyc_5L01_5V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, //===----------------------------------------------------------------------===// // Define generic 12 micro-op types -def N2Write_7cyc_6L01_6V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitV, N2UnitV, N2UnitV, N2UnitV, - N2UnitV, N2UnitV]> { +def N2Write_7c_6L01_6V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitV, N2UnitV, N2UnitV, N2UnitV, + N2UnitV, N2UnitV]> { let Latency = 7; let NumMicroOps = 12; } @@ -571,11 +571,11 @@ def N2Write_7cyc_6L01_6V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, //===----------------------------------------------------------------------===// // Define generic 15 micro-op types -def N2Write_7cyc_5L01_5S_5V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitS, - N2UnitS, N2UnitS, N2UnitS, - N2UnitS, N2UnitV, N2UnitV, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_7c_5L01_5S_5V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitS, + N2UnitS, N2UnitS, N2UnitS, + N2UnitS, N2UnitV, N2UnitV, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 7; let NumMicroOps = 15; } @@ -583,12 +583,12 @@ def N2Write_7cyc_5L01_5S_5V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, //===----------------------------------------------------------------------===// // Define generic 18 micro-op types -def N2Write_11cyc_9L01_9V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitV, N2UnitV, N2UnitV, - N2UnitV, N2UnitV, N2UnitV, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_11c_9L01_9V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitV, N2UnitV, N2UnitV, + N2UnitV, N2UnitV, N2UnitV, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 11; let NumMicroOps = 18; } @@ -596,15 +596,15 @@ def N2Write_11cyc_9L01_9V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, //===----------------------------------------------------------------------===// // Define generic 27 micro-op types -def N2Write_11cyc_9L01_9S_9V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitL01, N2UnitL01, N2UnitL01, - N2UnitS, N2UnitS, N2UnitS, - N2UnitS, N2UnitS, N2UnitS, - N2UnitS, N2UnitS, N2UnitS, - N2UnitV, N2UnitV, N2UnitV, - N2UnitV, N2UnitV, N2UnitV, - N2UnitV, N2UnitV, N2UnitV]> { +def N2Write_11c_9L01_9S_9V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitL01, N2UnitL01, N2UnitL01, + N2UnitS, N2UnitS, N2UnitS, + N2UnitS, N2UnitS, N2UnitS, + N2UnitS, N2UnitS, N2UnitS, + N2UnitV, N2UnitV, N2UnitV, + N2UnitV, N2UnitV, N2UnitV, + N2UnitV, N2UnitV, N2UnitV]> { let Latency = 11; let NumMicroOps = 27; } @@ -612,12 +612,12 @@ def N2Write_11cyc_9L01_9S_9V : SchedWriteRes<[N2UnitL01, N2UnitL01, N2UnitL01, //===----------------------------------------------------------------------===// // Define types for arithmetic and logical ops with short shifts def N2Write_Arith : SchedWriteVariant<[ - SchedVar<IsCheapLSL, [N2Write_1cyc_1I]>, - SchedVar<NoSchedPred, [N2Write_2cyc_1M]>]>; + SchedVar<IsCheapLSL, [N2Write_1c_1I]>, + SchedVar<NoSchedPred, [N2Write_2c_1M]>]>; def N2Write_Logical: SchedWriteVariant<[ - SchedVar<NeoverseNoLSL, [N2Write_1cyc_1I]>, - SchedVar<NoSchedPred, [N2Write_2cyc_1M]>]>; + SchedVar<NeoverseNoLSL, [N2Write_1c_1I]>, + SchedVar<NoSchedPred, [N2Write_2c_1M]>]>; // Miscellaneous // ----------------------------------------------------------------------------- @@ -629,24 +629,24 @@ def : InstRW<[WriteI], (instrs COPY)>; // Branch, immed // Compare and branch -def : SchedAlias<WriteBr, N2Write_1cyc_1B>; +def : SchedAlias<WriteBr, N2Write_1c_1B>; // Branch, register -def : SchedAlias<WriteBrReg, N2Write_1cyc_1B>; +def : SchedAlias<WriteBrReg, N2Write_1c_1B>; // Branch and link, immed // Branch and link, register -def : InstRW<[N2Write_1cyc_1B_1S], (instrs BL, BLR)>; +def : InstRW<[N2Write_1c_1B_1S], (instrs BL, BLR)>; // Arithmetic and Logical Instructions // ----------------------------------------------------------------------------- // ALU, basic // ALU, basic, flagset -def : SchedAlias<WriteI, N2Write_1cyc_1I>; +def : SchedAlias<WriteI, N2Write_1c_1I>; // ALU, extend and shift -def : SchedAlias<WriteIEReg, N2Write_2cyc_1M>; +def : SchedAlias<WriteIEReg, N2Write_2c_1M>; // Arithmetic, LSL shift, shift <= 4 // Arithmetic, flagset, LSL shift, shift <= 4 @@ -654,44 +654,44 @@ def : SchedAlias<WriteIEReg, N2Write_2cyc_1M>; def : SchedAlias<WriteISReg, N2Write_Arith>; // Logical, shift, no flagset -def : InstRW<[N2Write_1cyc_1I], +def : InstRW<[N2Write_1c_1I], (instregex "^(AND|BIC|EON|EOR|ORN|ORR)[WX]rs$")>; // Logical, shift, flagset def : InstRW<[N2Write_Logical], (instregex "^(AND|BIC)S[WX]rs$")>; // Arithmetic, immediate to logical address tag -def : InstRW<[N2Write_2cyc_1M], (instrs ADDG, SUBG)>; +def : InstRW<[N2Write_2c_1M], (instrs ADDG, SUBG)>; // Convert floating-point condition flags // Flag manipulation instructions def : WriteRes<WriteSys, []> { let Latency = 1; } // Insert Random Tags -def : InstRW<[N2Write_2cyc_1M], (instrs IRG, IRGstack)>; +def : InstRW<[N2Write_2c_1M], (instrs IRG, IRGstack)>; // Insert Tag Mask // Subtract Pointer // Subtract Pointer, flagset -def : InstRW<[N2Write_1cyc_1I], (instrs GMI, SUBP, SUBPS)>; +def : InstRW<[N2Write_1c_1I], (instrs GMI, SUBP, SUBPS)>; // Move and shift instructions // ----------------------------------------------------------------------------- -def : SchedAlias<WriteImm, N2Write_1cyc_1I>; +def : SchedAlias<WriteImm, N2Write_1c_1I>; // Divide and Multiply Instructions // ----------------------------------------------------------------------------- // SDIV, UDIV -def : SchedAlias<WriteID32, N2Write_12cyc_1M0>; -def : SchedAlias<WriteID64, N2Write_20cyc_1M0>; +def : SchedAlias<WriteID32, N2Write_12c_1M0>; +def : SchedAlias<WriteID64, N2Write_20c_1M0>; def : WriteRes<WriteIM32, [N2UnitM]> { let Latency = 2; } def : WriteRes<WriteIM64, [N2UnitM]> { let Latency = 2; } // Multiply high -def : InstRW<[N2Write_3cyc_1M], (instrs SMULHrr, UMULHrr)>; +def : InstRW<[N2Write_3c_1M], (instrs SMULHrr, UMULHrr)>; // Pointer Authentication Instructions (v8.3 PAC) // ----------------------------------------------------------------------------- @@ -701,21 +701,21 @@ def : InstRW<[N2Write_3cyc_1M], (instrs SMULHrr, UMULHrr)>; // Compute pointer authentication code for data address // Compute pointer authentication code, using generic key // Compute pointer authentication code for instruction address -def : InstRW<[N2Write_5cyc_1M0], (instregex "^AUT", "^PAC")>; +def : InstRW<[N2Write_5c_1M0], (instregex "^AUT", "^PAC")>; // Branch and link, register, with pointer authentication // Branch, register, with pointer authentication // Branch, return, with pointer authentication -def : InstRW<[N2Write_6cyc_1M0_1B], (instrs BLRAA, BLRAAZ, BLRAB, BLRABZ, BRAA, - BRAAZ, BRAB, BRABZ, RETAA, RETAB, - ERETAA, ERETAB)>; +def : InstRW<[N2Write_6c_1M0_1B], (instrs BLRAA, BLRAAZ, BLRAB, BLRABZ, BRAA, + BRAAZ, BRAB, BRABZ, RETAA, RETAB, + ERETAA, ERETAB)>; // Load register, with pointer authentication -def : InstRW<[N2Write_9cyc_1M0_1L], (instregex "^LDRA[AB](indexed|writeback)")>; +def : InstRW<[N2Write_9c_1M0_1L], (instregex "^LDRA[AB](indexed|writeback)")>; // Strip pointer authentication code -def : InstRW<[N2Write_2cyc_1M0], (instrs XPACD, XPACI, XPACLRI)>; +def : InstRW<[N2Write_2c_1M0], (instrs XPACD, XPACI, XPACLRI)>; // Miscellaneous data-processing instructions // ----------------------------------------------------------------------------- @@ -724,41 +724,41 @@ def : InstRW<[N2Write_2cyc_1M0], (instrs XPACD, XPACI, XPACLRI)>; // Bitfield extract, two regs // NOTE: We don't model the difference between EXTR where both operands are the // same (one reg). -def : SchedAlias<WriteExtr, N2Write_3cyc_1I_1M>; -def : InstRW<[N2Write_3cyc_1I_1M], (instrs EXTRWrri, EXTRXrri)>; +def : SchedAlias<WriteExtr, N2Write_3c_1I_1M>; +def : InstRW<[N2Write_3c_1I_1M], (instrs EXTRWrri, EXTRXrri)>; // Bitfield move, basic -def : SchedAlias<WriteIS, N2Write_1cyc_1I>; +def : SchedAlias<WriteIS, N2Write_1c_1I>; // Bitfield move, insert -def : InstRW<[N2Write_2cyc_1M], (instregex "^BFM[WX]ri$")>; +def : InstRW<[N2Write_2c_1M], (instregex "^BFM[WX]ri$")>; // Load instructions // ----------------------------------------------------------------------------- -def : SchedAlias<WriteLD, N2Write_4cyc_1L>; -def : SchedAlias<WriteLDIdx, N2Write_4cyc_1I_1L>; +def : SchedAlias<WriteLD, N2Write_4c_1L>; +def : SchedAlias<WriteLDIdx, N2Write_4c_1I_1L>; // Load pair, signed immed offset, signed words -def : InstRW<[N2Write_5cyc_1M0, WriteLDHi], (instrs LDPSWi)>; +def : InstRW<[N2Write_5c_1M0, WriteLDHi], (instrs LDPSWi)>; // Load pair, immed post-index or immed pre-index, signed words -def : InstRW<[WriteAdr, N2Write_5cyc_1M0, WriteLDHi], +def : InstRW<[WriteAdr, N2Write_5c_1M0, WriteLDHi], (instregex "^LDPSW(post|pre)$")>; // Store instructions // ----------------------------------------------------------------------------- -def : SchedAlias<WriteST, N2Write_1cyc_1L01_1D>; -def : SchedAlias<WriteSTIdx, N2Write_1cyc_1L01_1D_1I>; -def : SchedAlias<WriteSTP, N2Write_1cyc_1L01_1D>; -def : SchedAlias<WriteAdr, N2Write_1cyc_1I>; // copied from A57. +def : SchedAlias<WriteST, N2Write_1c_1L01_1D>; +def : SchedAlias<WriteSTIdx, N2Write_1c_1L01_1D_1I>; +def : SchedAlias<WriteSTP, N2Write_1c_1L01_1D>; +def : SchedAlias<WriteAdr, N2Write_1c_1I>; // copied from A57. // Tag load instructions // ----------------------------------------------------------------------------- // Load allocation tag // Load multiple allocation tags -def : InstRW<[N2Write_4cyc_1L], (instrs LDG, LDGM)>; +def : InstRW<[N2Write_4c_1L], (instrs LDG, LDGM)>; // Tag store instructions // ----------------------------------------------------------------------------- @@ -769,18 +769,18 @@ def : InstRW<[N2Write_4cyc_1L], (instrs LDG, LDGM)>; // Store Allocation Tag to one or two granules, zeroing, pre-index // Store allocation tag and reg pair to memory, post-Index // Store allocation tag and reg pair to memory, pre-Index -def : InstRW<[N2Write_1cyc_1L01_1D_1I], (instrs STGPreIndex, STGPostIndex, - ST2GPreIndex, ST2GPostIndex, - STZGPreIndex, STZGPostIndex, - STZ2GPreIndex, STZ2GPostIndex, - STGPpre, STGPpost)>; +def : InstRW<[N2Write_1c_1L01_1D_1I], (instrs STGPreIndex, STGPostIndex, + ST2GPreIndex, ST2GPostIndex, + STZGPreIndex, STZGPostIndex, + STZ2GPreIndex, STZ2GPostIndex, + STGPpre, STGPpost)>; // Store allocation tags to one or two granules, signed offset // Store allocation tag to two granules, zeroing, signed offset // Store allocation tag and reg pair to memory, signed offset // Store multiple allocation tags -def : InstRW<[N2Write_1cyc_1L01_1D], (instrs STGi, ST2Gi, STZGi, - STZ2Gi, STGPi, STGM, STZGM)>; +def : InstRW<[N2Write_1c_1L01_1D], (instrs STGi, ST2Gi, STZGi, + STZ2Gi, STGPi, STGM, STZGM)>; // FP data processing instructions // ----------------------------------------------------------------------------- @@ -790,124 +790,124 @@ def : InstRW<[N2Write_1cyc_1L01_1D], (instrs STGi, ST2Gi, STZGi, // FP min/max // FP negate // FP select -def : SchedAlias<WriteF, N2Write_2cyc_1V>; +def : SchedAlias<WriteF, N2Write_2c_1V>; // FP compare -def : SchedAlias<WriteFCmp, N2Write_2cyc_1V0>; +def : SchedAlias<WriteFCmp, N2Write_2c_1V0>; // FP divide, square root -def : SchedAlias<WriteFDiv, N2Write_7cyc_1V0>; +def : SchedAlias<WriteFDiv, N2Write_7c_1V0>; // FP divide, H-form -def : InstRW<[N2Write_7cyc_1V0], (instrs FDIVHrr)>; +def : InstRW<[N2Write_7c_1V0], (instrs FDIVHrr)>; // FP divide, S-form -def : InstRW<[N2Write_10cyc_1V0], (instrs FDIVSrr)>; +def : InstRW<[N2Write_10c_1V0], (instrs FDIVSrr)>; // FP divide, D-form -def : InstRW<[N2Write_15cyc_1V0], (instrs FDIVDrr)>; +def : InstRW<[N2Write_15c_1V0], (instrs FDIVDrr)>; // FP square root, H-form -def : InstRW<[N2Write_7cyc_1V0], (instrs FSQRTHr)>; +def : InstRW<[N2Write_7c_1V0], (instrs FSQRTHr)>; // FP square root, S-form -def : InstRW<[N2Write_9cyc_1V0], (instrs FSQRTSr)>; +def : InstRW<[N2Write_9c_1V0], (instrs FSQRTSr)>; // FP square root, D-form -def : InstRW<[N2Write_16cyc_1V0], (instrs FSQRTDr)>; +def : InstRW<[N2Write_16c_1V0], (instrs FSQRTDr)>; // FP multiply def : WriteRes<WriteFMul, [N2UnitV]> { let Latency = 3; } // FP multiply accumulate -def : InstRW<[N2Write_4cyc_1V], (instregex "^FN?M(ADD|SUB)[HSD]rrr$")>; +def : InstRW<[N2Write_4c_1V], (instregex "^FN?M(ADD|SUB)[HSD]rrr$")>; // FP round to integral -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FRINT[AIMNPXZ][HSD]r$", - "^FRINT(32|64)[XZ][SD]r$")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FRINT[AIMNPXZ][HSD]r$", + "^FRINT(32|64)[XZ][SD]r$")>; // FP miscellaneous instructions // ----------------------------------------------------------------------------- // FP convert, from gen to vec reg -def : InstRW<[N2Write_3cyc_1M0], (instregex "^[SU]CVTF[SU][WX][HSD]ri$")>; +def : InstRW<[N2Write_3c_1M0], (instregex "^[SU]CVTF[SU][WX][HSD]ri$")>; // FP convert, from vec to gen reg -def : InstRW<[N2Write_3cyc_1V], (instregex "^FCVT[AMNPZ][SU][SU][WX][HSD]r$")>; +def : InstRW<[N2Write_3c_1V], (instregex "^FCVT[AMNPZ][SU][SU][WX][HSD]r$")>; // FP convert, Javascript from vec to gen reg // FP convert, from vec to vec reg -def : SchedAlias<WriteFCvt, N2Write_3cyc_1V0>; +def : SchedAlias<WriteFCvt, N2Write_3c_1V0>; // FP move, immed // FP move, register -def : SchedAlias<WriteFImm, N2Write_2cyc_1V>; +def : SchedAlias<WriteFImm, N2Write_2c_1V>; // FP transfer, from gen to low half of vec reg -def : InstRW<[N2Write_3cyc_1M0], (instrs FMOVWHr, FMOVXHr, FMOVWSr, FMOVXDr, - FMOVHWr, FMOVHXr, FMOVSWr, FMOVDXr)>; +def : InstRW<[N2Write_3c_1M0], (instrs FMOVWHr, FMOVXHr, FMOVWSr, FMOVXDr, + FMOVHWr, FMOVHXr, FMOVSWr, FMOVDXr)>; // FP transfer, from gen to high half of vec reg -def : InstRW<[N2Write_5cyc_1M0_1V], (instrs FMOVXDHighr)>; +def : InstRW<[N2Write_5c_1M0_1V], (instrs FMOVXDHighr)>; // FP transfer, from vec to gen reg -def : SchedAlias<WriteFCopy, N2Write_2cyc_1V>; +def : SchedAlias<WriteFCopy, N2Write_2c_1V>; // FP load instructions // ----------------------------------------------------------------------------- // Load vector reg, literal, S/D/Q forms // Load vector reg, unscaled immed -def : InstRW<[N2Write_6cyc_1L], (instregex "^LDR[SDQ]l$", - "^LDUR[BHSDQ]i$")>; +def : InstRW<[N2Write_6c_1L], (instregex "^LDR[SDQ]l$", + "^LDUR[BHSDQ]i$")>; // Load vector reg, immed post-index -def : InstRW<[N2Write_6cyc_1I_1L, WriteI], (instregex "^LDR[BHSDQ]post$")>; +def : InstRW<[N2Write_6c_1I_1L, WriteI], (instregex "^LDR[BHSDQ]post$")>; // Load vector reg, immed pre-index -def : InstRW<[WriteAdr, N2Write_6cyc_1I_1L], (instregex "^LDR[BHSDQ]pre$")>; +def : InstRW<[WriteAdr, N2Write_6c_1I_1L], (instregex "^LDR[BHSDQ]pre$")>; // Load vector reg, unsigned immed -def : InstRW<[N2Write_6cyc_1L], (instregex "^LDR[BHSDQ]ui$")>; +def : InstRW<[N2Write_6c_1L], (instregex "^LDR[BHSDQ]ui$")>; // Load vector reg, register offset, basic // Load vector reg, register offset, scale, S/D-form // Load vector reg, register offset, extend // Load vector reg, register offset, extend, scale, S/D-form -def : InstRW<[N2Write_6cyc_1L, ReadAdrBase], (instregex "^LDR[BSD]ro[WX]$")>; +def : InstRW<[N2Write_6c_1L, ReadAdrBase], (instregex "^LDR[BSD]ro[WX]$")>; // Load vector reg, register offset, scale, H/Q-form // Load vector reg, register offset, extend, scale, H/Q-form -def : InstRW<[N2Write_7cyc_1I_1L, ReadAdrBase], (instregex "^LDR[HQ]ro[WX]$")>; +def : InstRW<[N2Write_7c_1I_1L, ReadAdrBase], (instregex "^LDR[HQ]ro[WX]$")>; // Load vector pair, immed offset, S/D-form -def : InstRW<[N2Write_6cyc_1L, WriteLDHi], (instregex "^LDN?P[SD]i$")>; +def : InstRW<[N2Write_6c_1L, WriteLDHi], (instregex "^LDN?P[SD]i$")>; // Load vector pair, immed offset, Q-form -def : InstRW<[N2Write_6cyc_2L, WriteLDHi], (instrs LDPQi, LDNPQi)>; +def : InstRW<[N2Write_6c_2L, WriteLDHi], (instrs LDPQi, LDNPQi)>; // Load vector pair, immed post-index, S/D-form // Load vector pair, immed pre-index, S/D-form -def : InstRW<[WriteAdr, N2Write_6cyc_1I_1L, WriteLDHi], +def : InstRW<[WriteAdr, N2Write_6c_1I_1L, WriteLDHi], (instregex "^LDP[SD](pre|post)$")>; // Load vector pair, immed post-index, Q-form // Load vector pair, immed pre-index, Q-form -def : InstRW<[WriteAdr, N2Write_6cyc_2I_2L, WriteLDHi], (instrs LDPQpost, - LDPQpre)>; +def : InstRW<[WriteAdr, N2Write_6c_2I_2L, WriteLDHi], (instrs LDPQpost, + LDPQpre)>; // FP store instructions // ----------------------------------------------------------------------------- // Store vector reg, unscaled immed, B/H/S/D-form // Store vector reg, unscaled immed, Q-form -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STUR[BHSDQ]i$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^STUR[BHSDQ]i$")>; // Store vector reg, immed post-index, B/H/S/D-form // Store vector reg, immed post-index, Q-form // Store vector reg, immed pre-index, B/H/S/D-form // Store vector reg, immed pre-index, Q-form -def : InstRW<[WriteAdr, N2Write_2cyc_1L01_1V_1I, ReadAdrBase], +def : InstRW<[WriteAdr, N2Write_2c_1L01_1V_1I, ReadAdrBase], (instregex "^STR[BHSDQ](pre|post)$")>; // Store vector reg, unsigned immed, B/H/S/D-form // Store vector reg, unsigned immed, Q-form -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STR[BHSDQ]ui$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^STR[BHSDQ]ui$")>; // Store vector reg, register offset, basic, B/H/S/D-form // Store vector reg, register offset, basic, Q-form @@ -915,35 +915,35 @@ def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STR[BHSDQ]ui$")>; // Store vector reg, register offset, extend, B/H/S/D-form // Store vector reg, register offset, extend, Q-form // Store vector reg, register offset, extend, scale, S/D-form -def : InstRW<[N2Write_2cyc_1L01_1V, ReadAdrBase], +def : InstRW<[N2Write_2c_1L01_1V, ReadAdrBase], (instregex "^STR[BSD]ro[WX]$")>; // Store vector reg, register offset, scale, H-form // Store vector reg, register offset, scale, Q-form // Store vector reg, register offset, extend, scale, H-form // Store vector reg, register offset, extend, scale, Q-form -def : InstRW<[N2Write_2cyc_1L01_1V, ReadAdrBase], +def : InstRW<[N2Write_2c_1L01_1V, ReadAdrBase], (instregex "^STR[HQ]ro[WX]$")>; // Store vector pair, immed offset, S-form // Store vector pair, immed offset, D-form -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STN?P[SD]i$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^STN?P[SD]i$")>; // Store vector pair, immed offset, Q-form -def : InstRW<[N2Write_2cyc_1L01_2V], (instrs STPQi, STNPQi)>; +def : InstRW<[N2Write_2c_1L01_2V], (instrs STPQi, STNPQi)>; // Store vector pair, immed post-index, S-form // Store vector pair, immed post-index, D-form // Store vector pair, immed pre-index, S-form // Store vector pair, immed pre-index, D-form -def : InstRW<[WriteAdr, N2Write_2cyc_1L01_1V_1I], +def : InstRW<[WriteAdr, N2Write_2c_1L01_1V_1I], (instregex "^STP[SD](pre|post)$")>; // Store vector pair, immed post-index, Q-form -def : InstRW<[N2Write_2cyc_1L01_2V_1I], (instrs STPQpost)>; +def : InstRW<[N2Write_2c_1L01_2V_1I], (instrs STPQpost)>; // Store vector pair, immed pre-index, Q-form -def : InstRW<[N2Write_2cyc_1L01_2V_2I], (instrs STPQpre)>; +def : InstRW<[N2Write_2c_1L01_2V_2I], (instrs STPQpre)>; // ASIMD integer instructions // ----------------------------------------------------------------------------- @@ -956,82 +956,82 @@ def : InstRW<[N2Write_2cyc_1L01_2V_2I], (instrs STPQpre)>; // ASIMD compare // ASIMD logical // ASIMD max/min, basic and pair-wise -def : SchedAlias<WriteVd, N2Write_2cyc_1V>; -def : SchedAlias<WriteVq, N2Write_2cyc_1V>; +def : SchedAlias<WriteVd, N2Write_2c_1V>; +def : SchedAlias<WriteVq, N2Write_2c_1V>; // ASIMD absolute diff accum // ASIMD absolute diff accum long -def : InstRW<[N2Write_4cyc_1V1], +def : InstRW<[N2Write_4c_1V1], (instregex "^SABAv", "^UABAv", "^SABALv", "^UABALv")>; // ASIMD arith, reduce, 4H/4S -def : InstRW<[N2Write_2cyc_1V1], (instregex "^(ADDV|[SU]ADDLV)v4(i16|i32)v$")>; +def : InstRW<[N2Write_2c_1V1], (instregex "^(ADDV|[SU]ADDLV)v4(i16|i32)v$")>; // ASIMD arith, reduce, 8B/8H -def : InstRW<[N2Write_4cyc_1V1_1V], +def : InstRW<[N2Write_4c_1V1_1V], (instregex "^(ADDV|[SU]ADDLV)v8(i8|i16)v$")>; // ASIMD arith, reduce, 16B -def : InstRW<[N2Write_4cyc_1V1], (instrs ADDVv16i8v, SADDLVv16i8v, - UADDLVv16i8v)>; +def : InstRW<[N2Write_4c_1V1], (instrs ADDVv16i8v, SADDLVv16i8v, + UADDLVv16i8v)>; // ASIMD dot product // ASIMD dot product using signed and unsigned integers -def : InstRW<[N2Write_3cyc_1V], +def : InstRW<[N2Write_3c_1V], (instregex "^([SU]|SU|US)DOT(lane)?(v8|v16)i8$")>; // ASIMD matrix multiply-accumulate -def : InstRW<[N2Write_3cyc_1V], (instrs SMMLA, UMMLA, USMMLA)>; +def : InstRW<[N2Write_3c_1V], (instrs SMMLA, UMMLA, USMMLA)>; // ASIMD max/min, reduce, 4H/4S -def : InstRW<[N2Write_2cyc_1V1], (instregex "^[SU](MAX|MIN)Vv4i16v$", - "^[SU](MAX|MIN)Vv4i32v$")>; +def : InstRW<[N2Write_2c_1V1], (instregex "^[SU](MAX|MIN)Vv4i16v$", + "^[SU](MAX|MIN)Vv4i32v$")>; // ASIMD max/min, reduce, 8B/8H -def : InstRW<[N2Write_4cyc_1V1_1V], (instregex "^[SU](MAX|MIN)Vv8i8v$", - "^[SU](MAX|MIN)Vv8i16v$")>; +def : InstRW<[N2Write_4c_1V1_1V], (instregex "^[SU](MAX|MIN)Vv8i8v$", + "^[SU](MAX|MIN)Vv8i16v$")>; // ASIMD max/min, reduce, 16B -def : InstRW<[N2Write_4cyc_2V1], (instregex "[SU](MAX|MIN)Vv16i8v$")>; +def : InstRW<[N2Write_4c_2V1], (instregex "[SU](MAX|MIN)Vv16i8v$")>; // ASIMD multiply -def : InstRW<[N2Write_4cyc_1V0], (instregex "^MULv", "^SQ(R)?DMULHv")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^MULv", "^SQ(R)?DMULHv")>; // ASIMD multiply accumulate -def : InstRW<[N2Write_4cyc_1V0], (instregex "^MLAv", "^MLSv")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^MLAv", "^MLSv")>; // ASIMD multiply accumulate high -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQRDMLAHv", "^SQRDMLSHv")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQRDMLAHv", "^SQRDMLSHv")>; // ASIMD multiply accumulate long -def : InstRW<[N2Write_4cyc_1V0], (instregex "^[SU]MLALv", "^[SU]MLSLv")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]MLALv", "^[SU]MLSLv")>; // ASIMD multiply accumulate saturating long -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQDMLALv", "^SQDMLSLv")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQDMLALv", "^SQDMLSLv")>; // ASIMD multiply/multiply long (8x8) polynomial, D-form // ASIMD multiply/multiply long (8x8) polynomial, Q-form -def : InstRW<[N2Write_3cyc_1V0], (instregex "^PMULL?(v8i8|v16i8)$")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^PMULL?(v8i8|v16i8)$")>; // ASIMD multiply long -def : InstRW<[N2Write_3cyc_1V], (instregex "^[SU]MULLv", "^SQDMULLv")>; +def : InstRW<[N2Write_3c_1V], (instregex "^[SU]MULLv", "^SQDMULLv")>; // ASIMD pairwise add and accumulate long -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]ADALPv")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]ADALPv")>; // ASIMD shift accumulate -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]SRAv", "^[SU]RSRAv")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]SRAv", "^[SU]RSRAv")>; // ASIMD shift by immed, basic -def : InstRW<[N2Write_2cyc_1V1], (instregex "^SHLv", "^SHLLv", "^SHRNv", - "^SSHLLv", "^SSHRv", "^USHLLv", - "^USHRv")>; +def : InstRW<[N2Write_2c_1V1], (instregex "^SHLv", "^SHLLv", "^SHRNv", + "^SSHLLv", "^SSHRv", "^USHLLv", + "^USHRv")>; // ASIMD shift by immed and insert, basic -def : InstRW<[N2Write_2cyc_1V1], (instregex "^SLIv", "^SRIv")>; +def : InstRW<[N2Write_2c_1V1], (instregex "^SLIv", "^SRIv")>; // ASIMD shift by immed, complex -def : InstRW<[N2Write_4cyc_1V1], +def : InstRW<[N2Write_4c_1V1], (instregex "^RSHRNv", "^SQRSHRNv", "^SQRSHRUNv", "^(SQSHLU?|UQSHL)[bhsd]$", "^(SQSHLU?|UQSHL)(v8i8|v16i8|v4i16|v8i16|v2i32|v4i32|v2i64)_shift$", @@ -1039,10 +1039,10 @@ def : InstRW<[N2Write_4cyc_1V1], "^UQSHRNv", "^URSHRv")>; // ASIMD shift by register, basic -def : InstRW<[N2Write_2cyc_1V1], (instregex "^[SU]SHLv")>; +def : InstRW<[N2Write_2c_1V1], (instregex "^[SU]SHLv")>; // ASIMD shift by register, complex -def : InstRW<[N2Write_4cyc_1V1], +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]RSHLv", "^[SU]QRSHLv", "^[SU]QSHL(v1i8|v1i16|v1i32|v1i64|v8i8|v16i8|v4i16|v8i16|v2i32|v4i32|v2i64)$")>; @@ -1059,110 +1059,110 @@ def : InstRW<[N2Write_4cyc_1V1], // Handled by SchedAlias<WriteV[dq], ...> // ASIMD FP complex multiply add -def : InstRW<[N2Write_4cyc_1V], (instregex "^FCMLAv")>; +def : InstRW<[N2Write_4c_1V], (instregex "^FCMLAv")>; // ASIMD FP convert, long (F16 to F32) -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FCVTL(v4|v8)i16")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FCVTL(v4|v8)i16")>; // ASIMD FP convert, long (F32 to F64) -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FCVTL(v2|v4)i32")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FCVTL(v2|v4)i32")>; // ASIMD FP convert, narrow (F32 to F16) -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FCVTN(v4|v8)i16")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FCVTN(v4|v8)i16")>; // ASIMD FP convert, narrow (F64 to F32) -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FCVTN(v2|v4)i32", - "^FCVTXN(v2|v4)f32")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FCVTN(v2|v4)i32", + "^FCVTXN(v2|v4)f32")>; // ASIMD FP convert, other, D-form F32 and Q-form F64 -def : InstRW<[N2Write_3cyc_1V0], (instregex "^[FSU]CVT[AMNPZ][SU]v2f(32|64)$", - "^[SU]CVTFv2f(32|64)$")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^[FSU]CVT[AMNPZ][SU]v2f(32|64)$", + "^[SU]CVTFv2f(32|64)$")>; // ASIMD FP convert, other, D-form F16 and Q-form F32 -def : InstRW<[N2Write_4cyc_2V0], (instregex "^[FSU]CVT[AMNPZ][SU]v4f(16|32)$", - "^[SU]CVTFv4f(16|32)$")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^[FSU]CVT[AMNPZ][SU]v4f(16|32)$", + "^[SU]CVTFv4f(16|32)$")>; // ASIMD FP convert, other, Q-form F16 -def : InstRW<[N2Write_6cyc_4V0], (instregex "^[FSU]CVT[AMNPZ][SU]v8f16$", - "^[SU]CVTFv8f16$")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^[FSU]CVT[AMNPZ][SU]v8f16$", + "^[SU]CVTFv8f16$")>; // ASIMD FP divide, D-form, F16 -def : InstRW<[N2Write_7cyc_1V0], (instrs FDIVv4f16)>; +def : InstRW<[N2Write_7c_1V0], (instrs FDIVv4f16)>; // ASIMD FP divide, D-form, F32 -def : InstRW<[N2Write_10cyc_2V0], (instrs FDIVv2f32)>; +def : InstRW<[N2Write_10c_2V0], (instrs FDIVv2f32)>; // ASIMD FP divide, Q-form, F16 -def : InstRW<[N2Write_13cyc_2V0], (instrs FDIVv8f16)>; +def : InstRW<[N2Write_13c_2V0], (instrs FDIVv8f16)>; // ASIMD FP divide, Q-form, F32 -def : InstRW<[N2Write_10cyc_2V0], (instrs FDIVv4f32)>; +def : InstRW<[N2Write_10c_2V0], (instrs FDIVv4f32)>; // ASIMD FP divide, Q-form, F64 -def : InstRW<[N2Write_15cyc_2V0], (instrs FDIVv2f64)>; +def : InstRW<[N2Write_15c_2V0], (instrs FDIVv2f64)>; // ASIMD FP max/min, reduce, F32 and D-form F16 -def : InstRW<[N2Write_4cyc_1V], (instregex "^(FMAX|FMIN)(NM)?Vv4(i16|i32)v$")>; +def : InstRW<[N2Write_4c_1V], (instregex "^(FMAX|FMIN)(NM)?Vv4(i16|i32)v$")>; // ASIMD FP max/min, reduce, Q-form F16 -def : InstRW<[N2Write_6cyc_2V], (instregex "^(FMAX|FMIN)(NM)?Vv8i16v$")>; +def : InstRW<[N2Write_6c_2V], (instregex "^(FMAX|FMIN)(NM)?Vv8i16v$")>; // ASIMD FP multiply -def : InstRW<[N2Write_3cyc_1V], (instregex "^FMULv", "^FMULXv")>; +def : InstRW<[N2Write_3c_1V], (instregex "^FMULv", "^FMULXv")>; // ASIMD FP multiply accumulate -def : InstRW<[N2Write_4cyc_1V], (instregex "^FMLAv", "^FMLSv")>; +def : InstRW<[N2Write_4c_1V], (instregex "^FMLAv", "^FMLSv")>; // ASIMD FP multiply accumulate long -def : InstRW<[N2Write_5cyc_1V], (instregex "^FMLALv", "^FMLSLv")>; +def : InstRW<[N2Write_5c_1V], (instregex "^FMLALv", "^FMLSLv")>; // ASIMD FP round, D-form F32 and Q-form F64 -def : InstRW<[N2Write_3cyc_1V0], +def : InstRW<[N2Write_3c_1V0], (instregex "^FRINT[AIMNPXZ]v2f(32|64)$", "^FRINT[32|64)[XZ]v2f(32|64)$")>; // ASIMD FP round, D-form F16 and Q-form F32 -def : InstRW<[N2Write_4cyc_2V0], +def : InstRW<[N2Write_4c_2V0], (instregex "^FRINT[AIMNPXZ]v4f(16|32)$", "^FRINT(32|64)[XZ]v4f32$")>; // ASIMD FP round, Q-form F16 -def : InstRW<[N2Write_6cyc_4V0], (instregex "^FRINT[AIMNPXZ]v8f16$")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^FRINT[AIMNPXZ]v8f16$")>; // ASIMD FP square root, D-form, F16 -def : InstRW<[N2Write_7cyc_1V0], (instrs FSQRTv4f16)>; +def : InstRW<[N2Write_7c_1V0], (instrs FSQRTv4f16)>; // ASIMD FP square root, D-form, F32 -def : InstRW<[N2Write_10cyc_2V0], (instrs FSQRTv2f32)>; +def : InstRW<[N2Write_10c_2V0], (instrs FSQRTv2f32)>; // ASIMD FP square root, Q-form, F16 -def : InstRW<[N2Write_13cyc_2V0], (instrs FSQRTv8f16)>; +def : InstRW<[N2Write_13c_2V0], (instrs FSQRTv8f16)>; // ASIMD FP square root, Q-form, F32 -def : InstRW<[N2Write_10cyc_2V0], (instrs FSQRTv4f32)>; +def : InstRW<[N2Write_10c_2V0], (instrs FSQRTv4f32)>; // ASIMD FP square root, Q-form, F64 -def : InstRW<[N2Write_16cyc_2V0], (instrs FSQRTv2f64)>; +def : InstRW<[N2Write_16c_2V0], (instrs FSQRTv2f64)>; // ASIMD BFloat16 (BF16) instructions // ----------------------------------------------------------------------------- // ASIMD convert, F32 to BF16 -def : InstRW<[N2Write_4cyc_1V0], (instrs BFCVTN, BFCVTN2)>; +def : InstRW<[N2Write_4c_1V0], (instrs BFCVTN, BFCVTN2)>; // ASIMD dot product -def : InstRW<[N2Write_4cyc_1V], (instrs BFDOTv4bf16, BFDOTv8bf16)>; +def : InstRW<[N2Write_4c_1V], (instrs BFDOTv4bf16, BFDOTv8bf16)>; // ASIMD matrix multiply accumulate -def : InstRW<[N2Write_5cyc_1V], (instrs BFMMLA)>; +def : InstRW<[N2Write_5c_1V], (instrs BFMMLA)>; // ASIMD multiply accumulate long -def : InstRW<[N2Write_4cyc_1V], (instrs BFMLALB, BFMLALBIdx, BFMLALT, - BFMLALTIdx)>; +def : InstRW<[N2Write_4c_1V], (instrs BFMLALB, BFMLALBIdx, BFMLALT, + BFMLALTIdx)>; // Scalar convert, F32 to BF16 -def : InstRW<[N2Write_3cyc_1V0], (instrs BFCVT)>; +def : InstRW<[N2Write_3c_1V0], (instrs BFCVT)>; // ASIMD miscellaneous instructions // ----------------------------------------------------------------------------- @@ -1185,412 +1185,412 @@ def : InstRW<[N2Write_3cyc_1V0], (instrs BFCVT)>; // Handled by SchedAlias<WriteV[dq], ...> // ASIMD duplicate, gen reg -def : InstRW<[N2Write_3cyc_1M0], (instregex "^DUPv.+gpr")>; +def : InstRW<[N2Write_3c_1M0], (instregex "^DUPv.+gpr")>; // ASIMD extract narrow, saturating -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]QXTNv", "^SQXTUNv")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]QXTNv", "^SQXTUNv")>; // ASIMD reciprocal and square root estimate, D-form U32 -def : InstRW<[N2Write_3cyc_1V0], (instrs URECPEv2i32, URSQRTEv2i32)>; +def : InstRW<[N2Write_3c_1V0], (instrs URECPEv2i32, URSQRTEv2i32)>; // ASIMD reciprocal and square root estimate, Q-form U32 -def : InstRW<[N2Write_4cyc_2V0], (instrs URECPEv4i32, URSQRTEv4i32)>; +def : InstRW<[N2Write_4c_2V0], (instrs URECPEv4i32, URSQRTEv4i32)>; // ASIMD reciprocal and square root estimate, D-form F32 and scalar forms -def : InstRW<[N2Write_3cyc_1V0], (instrs FRECPEv1f16, FRECPEv1i32, - FRECPEv1i64, FRECPEv2f32, - FRSQRTEv1f16, FRSQRTEv1i32, - FRSQRTEv1i64, FRSQRTEv2f32)>; +def : InstRW<[N2Write_3c_1V0], (instrs FRECPEv1f16, FRECPEv1i32, + FRECPEv1i64, FRECPEv2f32, + FRSQRTEv1f16, FRSQRTEv1i32, + FRSQRTEv1i64, FRSQRTEv2f32)>; // ASIMD reciprocal and square root estimate, D-form F16 and Q-form F32 -def : InstRW<[N2Write_4cyc_2V0], (instrs FRECPEv4f16, FRECPEv4f32, +def : InstRW<[N2Write_4c_2V0], (instrs FRECPEv4f16, FRECPEv4f32, FRSQRTEv4f16, FRSQRTEv4f32)>; // ASIMD reciprocal and square root estimate, Q-form F16 -def : InstRW<[N2Write_6cyc_4V0], (instrs FRECPEv8f16, FRSQRTEv8f16)>; +def : InstRW<[N2Write_6c_4V0], (instrs FRECPEv8f16, FRSQRTEv8f16)>; // ASIMD reciprocal exponent -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FRECPXv")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FRECPXv")>; // ASIMD reciprocal step -def : InstRW<[N2Write_4cyc_1V], (instregex "^FRECPSv", "^FRSQRTSv")>; +def : InstRW<[N2Write_4c_1V], (instregex "^FRECPSv", "^FRSQRTSv")>; // ASIMD table lookup, 3 table regs -def : InstRW<[N2Write_4cyc_2V], (instrs TBLv8i8Three, TBLv16i8Three)>; +def : InstRW<[N2Write_4c_2V], (instrs TBLv8i8Three, TBLv16i8Three)>; // ASIMD table lookup, 4 table regs -def : InstRW<[N2Write_4cyc_4V], (instrs TBLv8i8Four, TBLv16i8Four)>; +def : InstRW<[N2Write_4c_4V], (instrs TBLv8i8Four, TBLv16i8Four)>; // ASIMD table lookup extension, 2 table reg -def : InstRW<[N2Write_4cyc_2V], (instrs TBXv8i8Two, TBXv16i8Two)>; +def : InstRW<[N2Write_4c_2V], (instrs TBXv8i8Two, TBXv16i8Two)>; // ASIMD table lookup extension, 3 table reg -def : InstRW<[N2Write_6cyc_4V], (instrs TBXv8i8Three, TBXv16i8Three)>; +def : InstRW<[N2Write_6c_4V], (instrs TBXv8i8Three, TBXv16i8Three)>; // ASIMD table lookup extension, 4 table reg -def : InstRW<[N2Write_6cyc_8V], (instrs TBXv8i8Four, TBXv16i8Four)>; +def : InstRW<[N2Write_6c_8V], (instrs TBXv8i8Four, TBXv16i8Four)>; // ASIMD transfer, gen reg to element -def : InstRW<[N2Write_5cyc_1M0_1V], (instregex "^INSvi(8|16|32|64)gpr$")>; +def : InstRW<[N2Write_5c_1M0_1V], (instregex "^INSvi(8|16|32|64)gpr$")>; // ASIMD load instructions // ----------------------------------------------------------------------------- // ASIMD load, 1 element, multiple, 1 reg, D-form -def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1Onev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_1L], +def : InstRW<[N2Write_6c_1L], (instregex "^LD1Onev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_1L], (instregex "^LD1Onev(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 1 reg, Q-form -def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1Onev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_1L], +def : InstRW<[N2Write_6c_1L], (instregex "^LD1Onev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_1L], (instregex "^LD1Onev(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, multiple, 2 reg, D-form -def : InstRW<[N2Write_6cyc_2L], (instregex "^LD1Twov(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_2L], +def : InstRW<[N2Write_6c_2L], (instregex "^LD1Twov(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_2L], (instregex "^LD1Twov(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 2 reg, Q-form -def : InstRW<[N2Write_6cyc_2L], (instregex "^LD1Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_2L], +def : InstRW<[N2Write_6c_2L], (instregex "^LD1Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_2L], (instregex "^LD1Twov(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, multiple, 3 reg, D-form -def : InstRW<[N2Write_6cyc_3L], (instregex "^LD1Threev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_3L], +def : InstRW<[N2Write_6c_3L], (instregex "^LD1Threev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_3L], (instregex "^LD1Threev(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 3 reg, Q-form -def : InstRW<[N2Write_6cyc_3L], (instregex "^LD1Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_3L], +def : InstRW<[N2Write_6c_3L], (instregex "^LD1Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_3L], (instregex "^LD1Threev(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, multiple, 4 reg, D-form -def : InstRW<[N2Write_7cyc_4L], (instregex "^LD1Fourv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_7cyc_4L], +def : InstRW<[N2Write_7c_4L], (instregex "^LD1Fourv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_7c_4L], (instregex "^LD1Fourv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 4 reg, Q-form -def : InstRW<[N2Write_7cyc_4L], (instregex "^LD1Fourv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_7cyc_4L], +def : InstRW<[N2Write_7c_4L], (instregex "^LD1Fourv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_7c_4L], (instregex "^LD1Fourv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, one lane, B/H/S // ASIMD load, 1 element, one lane, D -def : InstRW<[N2Write_8cyc_1L_1V], (instregex "LD1i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_1V], (instregex "LD1i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_8c_1L_1V], (instregex "LD1i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_1V], (instregex "LD1i(8|16|32|64)_POST$")>; // ASIMD load, 1 element, all lanes, D-form, B/H/S // ASIMD load, 1 element, all lanes, D-form, D -def : InstRW<[N2Write_8cyc_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_8c_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, all lanes, Q-form -def : InstRW<[N2Write_8cyc_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_8c_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 2 element, multiple, D-form, B/H/S -def : InstRW<[N2Write_8cyc_1L_2V], (instregex "LD2Twov(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_2V], (instregex "LD2Twov(8b|4h|2s)_POST$")>; +def : InstRW<[N2Write_8c_1L_2V], (instregex "LD2Twov(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_2V], (instregex "LD2Twov(8b|4h|2s)_POST$")>; // ASIMD load, 2 element, multiple, Q-form, B/H/S // ASIMD load, 2 element, multiple, Q-form, D -def : InstRW<[N2Write_8cyc_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_8c_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)_POST$")>; // ASIMD load, 2 element, one lane, B/H // ASIMD load, 2 element, one lane, S // ASIMD load, 2 element, one lane, D -def : InstRW<[N2Write_8cyc_1L_2V], (instregex "LD2i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_2V], (instregex "LD2i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_8c_1L_2V], (instregex "LD2i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_2V], (instregex "LD2i(8|16|32|64)_POST$")>; // ASIMD load, 2 element, all lanes, D-form, B/H/S // ASIMD load, 2 element, all lanes, D-form, D -def : InstRW<[N2Write_8cyc_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_8c_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 2 element, all lanes, Q-form -def : InstRW<[N2Write_8cyc_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_8c_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 3 element, multiple, D-form, B/H/S -def : InstRW<[N2Write_8cyc_2L_3V], (instregex "LD3Threev(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_2L_3V], (instregex "LD3Threev(8b|4h|2s)_POST$")>; +def : InstRW<[N2Write_8c_2L_3V], (instregex "LD3Threev(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, N2Write_8c_2L_3V], (instregex "LD3Threev(8b|4h|2s)_POST$")>; // ASIMD load, 3 element, multiple, Q-form, B/H/S -def : InstRW<[N2Write_8cyc_3L_3V], (instregex "LD3Threev(16b|8h|4s)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_3L_3V], (instregex "LD3Threev(16b|8h|4s)_POST$")>; +def : InstRW<[N2Write_8c_3L_3V], (instregex "LD3Threev(16b|8h|4s)$")>; +def : InstRW<[WriteAdr, N2Write_8c_3L_3V], (instregex "LD3Threev(16b|8h|4s)_POST$")>; // ASIMD load, 3 element, multiple, Q-form, D -def : InstRW<[N2Write_8cyc_3L_3V], (instregex "LD3Threev(2d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_3L_3V], (instregex "LD3Threev(2d)_POST$")>; +def : InstRW<[N2Write_8c_3L_3V], (instregex "LD3Threev(2d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_3L_3V], (instregex "LD3Threev(2d)_POST$")>; // ASIMD load, 3 element, one lane, B/H // ASIMD load, 3 element, one lane, S // ASIMD load, 3 element, one lane, D -def : InstRW<[N2Write_8cyc_2L_3V], (instregex "LD3i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_2L_3V], (instregex "LD3i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_8c_2L_3V], (instregex "LD3i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_8c_2L_3V], (instregex "LD3i(8|16|32|64)_POST$")>; // ASIMD load, 3 element, all lanes, D-form, B/H/S // ASIMD load, 3 element, all lanes, D-form, D -def : InstRW<[N2Write_8cyc_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_8c_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 3 element, all lanes, Q-form, B/H/S // ASIMD load, 3 element, all lanes, Q-form, D -def : InstRW<[N2Write_8cyc_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_8c_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 4 element, multiple, D-form, B/H/S -def : InstRW<[N2Write_8cyc_3L_4V], (instregex "LD4Fourv(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_3L_4V], (instregex "LD4Fourv(8b|4h|2s)_POST$")>; +def : InstRW<[N2Write_8c_3L_4V], (instregex "LD4Fourv(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, N2Write_8c_3L_4V], (instregex "LD4Fourv(8b|4h|2s)_POST$")>; // ASIMD load, 4 element, multiple, Q-form, B/H/S // ASIMD load, 4 element, multiple, Q-form, D -def : InstRW<[N2Write_9cyc_4L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_9cyc_4L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_9c_4L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_9c_4L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 4 element, one lane, B/H // ASIMD load, 4 element, one lane, S // ASIMD load, 4 element, one lane, D -def : InstRW<[N2Write_8cyc_3L_4V], (instregex "LD4i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_3L_4V], (instregex "LD4i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_8c_3L_4V], (instregex "LD4i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_8c_3L_4V], (instregex "LD4i(8|16|32|64)_POST$")>; // ASIMD load, 4 element, all lanes, D-form, B/H/S // ASIMD load, 4 element, all lanes, D-form, D -def : InstRW<[N2Write_8cyc_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_8c_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 4 element, all lanes, Q-form, B/H/S // ASIMD load, 4 element, all lanes, Q-form, D -def : InstRW<[N2Write_8cyc_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_8cyc_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_8c_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_8c_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)_POST$")>; // ASIMD store instructions // ----------------------------------------------------------------------------- // ASIMD store, 1 element, multiple, 1 reg, D-form -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "ST1Onev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_1L01_1V], (instregex "ST1Onev(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "ST1Onev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_1L01_1V], (instregex "ST1Onev(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 1 reg, Q-form -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "ST1Onev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_1L01_1V], (instregex "ST1Onev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "ST1Onev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_1L01_1V], (instregex "ST1Onev(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, multiple, 2 reg, D-form -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "ST1Twov(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_1L01_1V], (instregex "ST1Twov(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "ST1Twov(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_1L01_1V], (instregex "ST1Twov(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 2 reg, Q-form -def : InstRW<[N2Write_2cyc_2L01_2V], (instregex "ST1Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_2L01_2V], (instregex "ST1Twov(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_2c_2L01_2V], (instregex "ST1Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_2L01_2V], (instregex "ST1Twov(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, multiple, 3 reg, D-form -def : InstRW<[N2Write_2cyc_2L01_2V], (instregex "ST1Threev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_2L01_2V], (instregex "ST1Threev(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_2c_2L01_2V], (instregex "ST1Threev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_2L01_2V], (instregex "ST1Threev(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 3 reg, Q-form -def : InstRW<[N2Write_2cyc_3L01_3V], (instregex "ST1Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_3L01_3V], (instregex "ST1Threev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_2c_3L01_3V], (instregex "ST1Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_3L01_3V], (instregex "ST1Threev(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, multiple, 4 reg, D-form -def : InstRW<[N2Write_2cyc_2L01_2V], (instregex "ST1Fourv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_2L01_2V], (instregex "ST1Fourv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[N2Write_2c_2L01_2V], (instregex "ST1Fourv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_2L01_2V], (instregex "ST1Fourv(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 4 reg, Q-form -def : InstRW<[N2Write_2cyc_4L01_4V], (instregex "ST1Fourv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_2cyc_4L01_4V], (instregex "ST1Fourv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_2c_4L01_4V], (instregex "ST1Fourv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_2c_4L01_4V], (instregex "ST1Fourv(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, one lane, B/H/S // ASIMD store, 1 element, one lane, D -def : InstRW<[N2Write_4cyc_1L01_1V], (instregex "ST1i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_4cyc_1L01_1V], (instregex "ST1i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_4c_1L01_1V], (instregex "ST1i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_4c_1L01_1V], (instregex "ST1i(8|16|32|64)_POST$")>; // ASIMD store, 2 element, multiple, D-form, B/H/S -def : InstRW<[N2Write_4cyc_1L01_1V], (instregex "ST2Twov(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, N2Write_4cyc_1L01_1V], (instregex "ST2Twov(8b|4h|2s)_POST$")>; +def : InstRW<[N2Write_4c_1L01_1V], (instregex "ST2Twov(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, N2Write_4c_1L01_1V], (instregex "ST2Twov(8b|4h|2s)_POST$")>; // ASIMD store, 2 element, multiple, Q-form, B/H/S // ASIMD store, 2 element, multiple, Q-form, D -def : InstRW<[N2Write_4cyc_2L01_2V], (instregex "ST2Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_4cyc_2L01_2V], (instregex "ST2Twov(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_4c_2L01_2V], (instregex "ST2Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_4c_2L01_2V], (instregex "ST2Twov(16b|8h|4s|2d)_POST$")>; // ASIMD store, 2 element, one lane, B/H/S // ASIMD store, 2 element, one lane, D -def : InstRW<[N2Write_4cyc_1L01_1V], (instregex "ST2i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_4cyc_1L01_1V], (instregex "ST2i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_4c_1L01_1V], (instregex "ST2i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_4c_1L01_1V], (instregex "ST2i(8|16|32|64)_POST$")>; // ASIMD store, 3 element, multiple, D-form, B/H/S -def : InstRW<[N2Write_5cyc_2L01_2V], (instregex "ST3Threev(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, N2Write_5cyc_2L01_2V], (instregex "ST3Threev(8b|4h|2s)_POST$")>; +def : InstRW<[N2Write_5c_2L01_2V], (instregex "ST3Threev(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, N2Write_5c_2L01_2V], (instregex "ST3Threev(8b|4h|2s)_POST$")>; // ASIMD store, 3 element, multiple, Q-form, B/H/S // ASIMD store, 3 element, multiple, Q-form, D -def : InstRW<[N2Write_6cyc_3L01_3V], (instregex "ST3Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_3L01_3V], (instregex "ST3Threev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[N2Write_6c_3L01_3V], (instregex "ST3Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, N2Write_6c_3L01_3V], (instregex "ST3Threev(16b|8h|4s|2d)_POST$")>; // ASIMD store, 3 element, one lane, B/H // ASIMD store, 3 element, one lane, S // ASIMD store, 3 element, one lane, D -def : InstRW<[N2Write_6cyc_3L01_3V], (instregex "ST3i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_3L01_3V], (instregex "ST3i(8|16|32|64)_POST$")>; +def : InstRW<[N2Write_6c_3L01_3V], (instregex "ST3i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, N2Write_6c_3L01_3V], (instregex "ST3i(8|16|32|64)_POST$")>; // ASIMD store, 4 element, multiple, D-form, B/H/S -def : InstRW<[N2Write_6cyc_3L01_3V], (instregex "ST4Fourv(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_3L01_3V], (instregex "ST4Fourv(8b|4h|2s)_POST$")>; +def : InstRW<[N2Write_6c_3L01_3V], (instregex "ST4Fourv(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, N2Write_6c_3L01_3V], (instregex "ST4Fourv(8b|4h|2s)_POST$")>; // ASIMD store, 4 element, multiple, Q-form, B/H/S -def : InstRW<[N2Write_7cyc_6L01_6V], (instregex "ST4Fourv(16b|8h|4s)$")>; -def : InstRW<[WriteAdr, N2Write_7cyc_6L01_6V], (instregex "ST4Fourv(16b|8h|4s)_POST$")>; +def : InstRW<[N2Write_7c_6L01_6V], (instregex "ST4Fourv(16b|8h|4s)$")>; +def : InstRW<[WriteAdr, N2Write_7c_6L01_6V], (instregex "ST4Fourv(16b|8h|4s)_POST$")>; // ASIMD store, 4 element, multiple, Q-form, D -def : InstRW<[N2Write_5cyc_4L01_4V], (instregex "ST4Fourv(2d)$")>; -def : InstRW<[WriteAdr, N2Write_5cyc_4L01_4V], (instregex "ST4Fourv(2d)_POST$")>; +def : InstRW<[N2Write_5c_4L01_4V], (instregex "ST4Fourv(2d)$")>; +def : InstRW<[WriteAdr, N2Write_5c_4L01_4V], (instregex "ST4Fourv(2d)_POST$")>; // ASIMD store, 4 element, one lane, B/H/S -def : InstRW<[N2Write_6cyc_3L01_3V], (instregex "ST4i(8|16|32)$")>; -def : InstRW<[WriteAdr, N2Write_6cyc_3L01_3V], (instregex "ST4i(8|16|32)_POST$")>; +def : InstRW<[N2Write_6c_3L01_3V], (instregex "ST4i(8|16|32)$")>; +def : InstRW<[WriteAdr, N2Write_6c_3L01_3V], (instregex "ST4i(8|16|32)_POST$")>; // ASIMD store, 4 element, one lane, D -def : InstRW<[N2Write_4cyc_3L01_3V], (instregex "ST4i(64)$")>; -def : InstRW<[WriteAdr, N2Write_4cyc_3L01_3V], (instregex "ST4i(64)_POST$")>; +def : InstRW<[N2Write_4c_3L01_3V], (instregex "ST4i(64)$")>; +def : InstRW<[WriteAdr, N2Write_4c_3L01_3V], (instregex "ST4i(64)_POST$")>; // Cryptography extensions // ----------------------------------------------------------------------------- // Crypto AES ops -def : InstRW<[N2Write_2cyc_1V], (instregex "^AES[DE]rr$", "^AESI?MCrr")>; +def : InstRW<[N2Write_2c_1V], (instregex "^AES[DE]rr$", "^AESI?MCrr")>; // Crypto polynomial (64x64) multiply long -def : InstRW<[N2Write_2cyc_1V0], (instrs PMULLv1i64, PMULLv2i64)>; +def : InstRW<[N2Write_2c_1V0], (instrs PMULLv1i64, PMULLv2i64)>; // Crypto SHA1 hash acceleration op // Crypto SHA1 schedule acceleration ops -def : InstRW<[N2Write_2cyc_1V0], (instregex "^SHA1(H|SU0|SU1)")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^SHA1(H|SU0|SU1)")>; // Crypto SHA1 hash acceleration ops // Crypto SHA256 hash acceleration ops -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SHA1[CMP]", "^SHA256H2?")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SHA1[CMP]", "^SHA256H2?")>; // Crypto SHA256 schedule acceleration ops -def : InstRW<[N2Write_2cyc_1V0], (instregex "^SHA256SU[01]")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^SHA256SU[01]")>; // Crypto SHA512 hash acceleration ops -def : InstRW<[N2Write_2cyc_1V0], (instregex "^SHA512(H|H2|SU0|SU1)")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^SHA512(H|H2|SU0|SU1)")>; // Crypto SHA3 ops -def : InstRW<[N2Write_2cyc_1V0], (instrs BCAX, EOR3, RAX1, XAR)>; +def : InstRW<[N2Write_2c_1V0], (instrs BCAX, EOR3, RAX1, XAR)>; // Crypto SM3 ops -def : InstRW<[N2Write_2cyc_1V0], (instregex "^SM3PARTW[12]$", "^SM3SS1$", - "^SM3TT[12][AB]$")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^SM3PARTW[12]$", "^SM3SS1$", + "^SM3TT[12][AB]$")>; // Crypto SM4 ops -def : InstRW<[N2Write_4cyc_1V0], (instrs SM4E, SM4ENCKEY)>; +def : InstRW<[N2Write_4c_1V0], (instrs SM4E, SM4ENCKEY)>; // CRC // ----------------------------------------------------------------------------- -def : InstRW<[N2Write_2cyc_1M0], (instregex "^CRC32")>; +def : InstRW<[N2Write_2c_1M0], (instregex "^CRC32")>; // SVE Predicate instructions // ----------------------------------------------------------------------------- // Loop control, based on predicate -def : InstRW<[N2Write_2cyc_1M], (instrs BRKA_PPmP, BRKA_PPzP, - BRKB_PPmP, BRKB_PPzP)>; +def : InstRW<[N2Write_2c_1M], (instrs BRKA_PPmP, BRKA_PPzP, + BRKB_PPmP, BRKB_PPzP)>; // Loop control, based on predicate and flag setting -def : InstRW<[N2Write_3cyc_1M], (instrs BRKAS_PPzP, BRKBS_PPzP)>; +def : InstRW<[N2Write_3c_1M], (instrs BRKAS_PPzP, BRKBS_PPzP)>; // Loop control, propagating -def : InstRW<[N2Write_2cyc_1M0], (instrs BRKN_PPzP, BRKPA_PPzPP, BRKPB_PPzPP)>; +def : InstRW<[N2Write_2c_1M0], (instrs BRKN_PPzP, BRKPA_PPzPP, BRKPB_PPzPP)>; // Loop control, propagating and flag setting -def : InstRW<[N2Write_3cyc_1M0_1M], (instrs BRKNS_PPzP, BRKPAS_PPzPP, - BRKPBS_PPzPP)>; +def : InstRW<[N2Write_3c_1M0_1M], (instrs BRKNS_PPzP, BRKPAS_PPzPP, + BRKPBS_PPzPP)>; // Loop control, based on GPR -def : InstRW<[N2Write_3cyc_1M], +def : InstRW<[N2Write_3c_1M], (instregex "^WHILE(GE|GT|HI|HS|LE|LO|LS|LT)_P(WW|XX)_[BHSD]$")>; -def : InstRW<[N2Write_3cyc_1M], (instregex "^WHILE(RW|WR)_PXX_[BHSD]$")>; +def : InstRW<[N2Write_3c_1M], (instregex "^WHILE(RW|WR)_PXX_[BHSD]$")>; // Loop terminate -def : InstRW<[N2Write_1cyc_1M], (instregex "^CTERM(EQ|NE)_(WW|XX)$")>; +def : InstRW<[N2Write_1c_1M], (instregex "^CTERM(EQ|NE)_(WW|XX)$")>; // Predicate counting scalar -def : InstRW<[N2Write_2cyc_1M], (instrs ADDPL_XXI, ADDVL_XXI, RDVLI_XI)>; -def : InstRW<[N2Write_2cyc_1M], +def : InstRW<[N2Write_2c_1M], (instrs ADDPL_XXI, ADDVL_XXI, RDVLI_XI)>; +def : InstRW<[N2Write_2c_1M], (instregex "^(CNT|DEC|INC|SQDEC|SQINC|UQDEC|UQINC)[BHWD]_XPiI$", "^SQ(DEC|INC)[BHWD]_XPiWdI$", "^(UQDEC|UQINC)[BHWD]_WPiI$")>; // Predicate counting scalar, active predicate -def : InstRW<[N2Write_2cyc_1M], +def : InstRW<[N2Write_2c_1M], (instregex "^CNTP_XPP_[BHSD]$", "^(DEC|INC|SQDEC|SQINC|UQDEC|UQINC)P_XP_[BHSD]$", "^(UQDEC|UQINC)P_WP_[BHSD]$", "^(SQDEC|SQINC|UQDEC|UQINC)P_XPWd_[BHSD]$")>; // Predicate counting vector, active predicate -def : InstRW<[N2Write_7cyc_1M_1M0_1V], +def : InstRW<[N2Write_7c_1M_1M0_1V], (instregex "^(DEC|INC|SQDEC|SQINC|UQDEC|UQINC)P_ZP_[HSD]$")>; // Predicate logical -def : InstRW<[N2Write_1cyc_1M0], +def : InstRW<[N2Write_1c_1M0], (instregex "^(AND|BIC|EOR|NAND|NOR|ORN|ORR)_PPzPP$")>; // Predicate logical, flag setting -def : InstRW<[N2Write_2cyc_1M0_1M], +def : InstRW<[N2Write_2c_1M0_1M], (instregex "^(ANDS|BICS|EORS|NANDS|NORS|ORNS|ORRS)_PPzPP$")>; // Predicate reverse -def : InstRW<[N2Write_2cyc_1M], (instregex "^REV_PP_[BHSD]$")>; +def : InstRW<[N2Write_2c_1M], (instregex "^REV_PP_[BHSD]$")>; // Predicate select -def : InstRW<[N2Write_1cyc_1M0], (instrs SEL_PPPP)>; +def : InstRW<[N2Write_1c_1M0], (instrs SEL_PPPP)>; // Predicate set -def : InstRW<[N2Write_2cyc_1M], (instregex "^PFALSE$", "^PTRUE_[BHSD]$")>; +def : InstRW<[N2Write_2c_1M], (instregex "^PFALSE$", "^PTRUE_[BHSD]$")>; // Predicate set/initialize, set flags -def : InstRW<[N2Write_3cyc_1M], (instregex "^PTRUES_[BHSD]$")>; +def : InstRW<[N2Write_3c_1M], (instregex "^PTRUES_[BHSD]$")>; // Predicate find first/next -def : InstRW<[N2Write_3cyc_1M], (instregex "^PFIRST_B$", "^PNEXT_[BHSD]$")>; +def : InstRW<[N2Write_3c_1M], (instregex "^PFIRST_B$", "^PNEXT_[BHSD]$")>; // Predicate test -def : InstRW<[N2Write_1cyc_1M], (instrs PTEST_PP)>; +def : InstRW<[N2Write_1c_1M], (instrs PTEST_PP)>; // Predicate transpose -def : InstRW<[N2Write_2cyc_1M], (instregex "^TRN[12]_PPP_[BHSDQ]$")>; +def : InstRW<[N2Write_2c_1M], (instregex "^TRN[12]_PPP_[BHSDQ]$")>; // Predicate unpack and widen -def : InstRW<[N2Write_2cyc_1M], (instrs PUNPKHI_PP, PUNPKLO_PP)>; +def : InstRW<[N2Write_2c_1M], (instrs PUNPKHI_PP, PUNPKLO_PP)>; // Predicate zip/unzip -def : InstRW<[N2Write_2cyc_1M], (instregex "^(ZIP|UZP)[12]_PPP_[BHSDQ]$")>; +def : InstRW<[N2Write_2c_1M], (instregex "^(ZIP|UZP)[12]_PPP_[BHSDQ]$")>; // SVE integer instructions // ----------------------------------------------------------------------------- // Arithmetic, absolute diff -def : InstRW<[N2Write_2cyc_1V], (instregex "^[SU]ABD_ZPmZ_[BHSD]", - "^[SU]ABD_ZPZZ_[BHSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^[SU]ABD_ZPmZ_[BHSD]", + "^[SU]ABD_ZPZZ_[BHSD]")>; // Arithmetic, absolute diff accum -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]ABA_ZZZ_[BHSD]$")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]ABA_ZZZ_[BHSD]$")>; // Arithmetic, absolute diff accum long -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]ABAL[TB]_ZZZ_[HSD]$")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]ABAL[TB]_ZZZ_[HSD]$")>; // Arithmetic, absolute diff long -def : InstRW<[N2Write_2cyc_1V], (instregex "^[SU]ABDL[TB]_ZZZ_[HSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^[SU]ABDL[TB]_ZZZ_[HSD]$")>; // Arithmetic, basic -def : InstRW<[N2Write_2cyc_1V], +def : InstRW<[N2Write_2c_1V], (instregex "^(ABS|ADD|CNOT|NEG|SUB|SUBR)_ZPmZ_[BHSD]", "^(ADD|SUB)_ZZZ_[BHSD]", "^(ADD|SUB|SUBR)_ZPZZ_[BHSD]", @@ -1603,7 +1603,7 @@ def : InstRW<[N2Write_2cyc_1V], "^SSUBL(BT|TB)_ZZZ_[HSD]")>; // Arithmetic, complex -def : InstRW<[N2Write_2cyc_1V], +def : InstRW<[N2Write_2c_1V], (instregex "^R?(ADD|SUB)HN[BT]_ZZZ_[BHS]", "^SQ(ABS|ADD|NEG|SUB|SUBR)_ZPmZ_[BHSD]", "^[SU]Q(ADD|SUB)_ZZZ_[BHSD]", @@ -1612,16 +1612,16 @@ def : InstRW<[N2Write_2cyc_1V], "^(UQSUB|UQSUBR)_ZPmZ_[BHSD]")>; // Arithmetic, large integer -def : InstRW<[N2Write_2cyc_1V], (instregex "^(AD|SB)CL[BT]_ZZZ_[SD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^(AD|SB)CL[BT]_ZZZ_[SD]$")>; // Arithmetic, pairwise add -def : InstRW<[N2Write_2cyc_1V], (instregex "^ADDP_ZPmZ_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^ADDP_ZPmZ_[BHSD]$")>; // Arithmetic, pairwise add and accum long -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]ADALP_ZPmZ_[HSD]$")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]ADALP_ZPmZ_[HSD]$")>; // Arithmetic, shift -def : InstRW<[N2Write_2cyc_1V1], +def : InstRW<[N2Write_2c_1V1], (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", "^(ASR|LSL|LSR)_ZPmI_[BHSD]", @@ -1631,16 +1631,16 @@ def : InstRW<[N2Write_2cyc_1V1], "^(ASRR|LSLR|LSRR)_ZPmZ_[BHSD]")>; // Arithmetic, shift and accumulate -def : InstRW<[N2Write_4cyc_1V1], +def : InstRW<[N2Write_4c_1V1], (instregex "^(SRSRA|SSRA|URSRA|USRA)_ZZI_[BHSD]$")>; // Arithmetic, shift by immediate // Arithmetic, shift by immediate and insert -def : InstRW<[N2Write_2cyc_1V1], +def : InstRW<[N2Write_2c_1V1], (instregex "^(SHRNB|SHRNT|SSHLLB|SSHLLT|USHLLB|USHLLT|SLI|SRI)_ZZI_[BHSD]$")>; // Arithmetic, shift complex -def : InstRW<[N2Write_4cyc_1V1], +def : InstRW<[N2Write_4c_1V1], (instregex "^(SQ)?RSHRU?N[BT]_ZZI_[BHS]", "^(SQRSHL|SQRSHLR|SQSHL|SQSHLR|UQRSHL|UQRSHLR|UQSHL|UQSHLR)_ZPmZ_[BHSD]", "^[SU]QR?SHL_ZPZZ_[BHSD]", @@ -1649,135 +1649,135 @@ def : InstRW<[N2Write_4cyc_1V1], "^UQR?SHRN[BT]_ZZI_[BHS]")>; // Arithmetic, shift right for divide -def : InstRW<[N2Write_4cyc_1V1], (instregex "^ASRD_(ZPmI|ZPZI)_[BHSD]")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^ASRD_(ZPmI|ZPZI)_[BHSD]")>; // Arithmetic, shift rounding -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]RSHLR?_ZPmZ_[BHSD]", - "^[SU]RSHL_ZPZZ_[BHSD]", - "^[SU]RSHR_(ZPmI|ZPZI)_[BHSD]")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]RSHLR?_ZPmZ_[BHSD]", + "^[SU]RSHL_ZPZZ_[BHSD]", + "^[SU]RSHR_(ZPmI|ZPZI)_[BHSD]")>; // Bit manipulation -def : InstRW<[N2Write_6cyc_2V1], (instregex "^(BDEP|BEXT|BGRP)_ZZZ_[BHSD]")>; +def : InstRW<[N2Write_6c_2V1], (instregex "^(BDEP|BEXT|BGRP)_ZZZ_[BHSD]")>; // Bitwise select -def : InstRW<[N2Write_2cyc_1V], (instregex "^(BSL|BSL1N|BSL2N|NBSL)_ZZZZ$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^(BSL|BSL1N|BSL2N|NBSL)_ZZZZ$")>; // Count/reverse bits -def : InstRW<[N2Write_2cyc_1V], (instregex "^(CLS|CLZ|CNT|RBIT)_ZPmZ_[BHSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^(CLS|CLZ|CNT|RBIT)_ZPmZ_[BHSD]")>; // Broadcast logical bitmask immediate to vector -def : InstRW<[N2Write_2cyc_1V], (instrs DUPM_ZI)>; +def : InstRW<[N2Write_2c_1V], (instrs DUPM_ZI)>; // Compare and set flags -def : InstRW<[N2Write_4cyc_1V0_1M], +def : InstRW<[N2Write_4c_1V0_1M], (instregex "^CMP(EQ|GE|GT|HI|HS|LE|LO|LS|LT|NE)_PPzZ[IZ]_[BHSD]$", "^CMP(EQ|GE|GT|HI|HS|LE|LO|LS|LT|NE)_WIDE_PPzZZ_[BHS]$")>; // Complex add -def : InstRW<[N2Write_2cyc_1V], (instregex "^(SQ)?CADD_ZZI_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^(SQ)?CADD_ZZI_[BHSD]$")>; // Complex dot product 8-bit element -def : InstRW<[N2Write_3cyc_1V], (instrs CDOT_ZZZ_S, CDOT_ZZZI_S)>; +def : InstRW<[N2Write_3c_1V], (instrs CDOT_ZZZ_S, CDOT_ZZZI_S)>; // Complex dot product 16-bit element -def : InstRW<[N2Write_4cyc_1V0], (instrs CDOT_ZZZ_D, CDOT_ZZZI_D)>; +def : InstRW<[N2Write_4c_1V0], (instrs CDOT_ZZZ_D, CDOT_ZZZI_D)>; // Complex multiply-add B, H, S element size -def : InstRW<[N2Write_4cyc_1V0], (instregex "^CMLA_ZZZ_[BHS]$", - "^CMLA_ZZZI_[HS]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^CMLA_ZZZ_[BHS]$", + "^CMLA_ZZZI_[HS]$")>; // Complex multiply-add D element size -def : InstRW<[N2Write_5cyc_2V0], (instrs CMLA_ZZZ_D)>; +def : InstRW<[N2Write_5c_2V0], (instrs CMLA_ZZZ_D)>; // Conditional extract operations, scalar form -def : InstRW<[N2Write_8cyc_1M0_1V1_1V], (instregex "^CLAST[AB]_RPZ_[BHSD]$")>; +def : InstRW<[N2Write_8c_1M0_1V1_1V], (instregex "^CLAST[AB]_RPZ_[BHSD]$")>; // Conditional extract operations, SIMD&FP scalar and vector forms -def : InstRW<[N2Write_3cyc_1V1], (instregex "^CLAST[AB]_[VZ]PZ_[BHSD]$", - "^COMPACT_ZPZ_[SD]$", - "^SPLICE_ZPZZ?_[BHSD]$")>; +def : InstRW<[N2Write_3c_1V1], (instregex "^CLAST[AB]_[VZ]PZ_[BHSD]$", + "^COMPACT_ZPZ_[SD]$", + "^SPLICE_ZPZZ?_[BHSD]$")>; // Convert to floating point, 64b to float or convert to double -def : InstRW<[N2Write_3cyc_1V0], (instregex "^[SU]CVTF_ZPmZ_Dto[HSD]", - "^[SU]CVTF_ZPmZ_StoD")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^[SU]CVTF_ZPmZ_Dto[HSD]", + "^[SU]CVTF_ZPmZ_StoD")>; // Convert to floating point, 32b to single or half -def : InstRW<[N2Write_4cyc_2V0], (instregex "^[SU]CVTF_ZPmZ_Sto[HS]")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^[SU]CVTF_ZPmZ_Sto[HS]")>; // Convert to floating point, 16b to half -def : InstRW<[N2Write_6cyc_4V0], (instregex "^[SU]CVTF_ZPmZ_HtoH")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^[SU]CVTF_ZPmZ_HtoH")>; // Copy, scalar -def : InstRW<[N2Write_5cyc_1M0_1V], (instregex "^CPY_ZPmR_[BHSD]$")>; +def : InstRW<[N2Write_5c_1M0_1V], (instregex "^CPY_ZPmR_[BHSD]$")>; // Copy, scalar SIMD&FP or imm -def : InstRW<[N2Write_2cyc_1V], (instregex "^CPY_ZPm[IV]_[BHSD]$", - "^CPY_ZPzI_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^CPY_ZPm[IV]_[BHSD]$", + "^CPY_ZPzI_[BHSD]$")>; // Divides, 32 bit -def : InstRW<[N2Write_12cyc_1V0], (instregex "^[SU]DIVR?_ZPmZ_S", - "^[SU]DIV_ZPZZ_S")>; +def : InstRW<[N2Write_12c_1V0], (instregex "^[SU]DIVR?_ZPmZ_S", + "^[SU]DIV_ZPZZ_S")>; // Divides, 64 bit -def : InstRW<[N2Write_20cyc_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", - "^[SU]DIV_ZPZZ_D")>; +def : InstRW<[N2Write_20c_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", + "^[SU]DIV_ZPZZ_D")>; // Dot product, 8 bit -def : InstRW<[N2Write_3cyc_1V], (instregex "^[SU]DOT_ZZZI?_S$")>; +def : InstRW<[N2Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_S$")>; // Dot product, 8 bit, using signed and unsigned integers -def : InstRW<[N2Write_3cyc_1V], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; +def : InstRW<[N2Write_3c_1V], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>; // Dot product, 16 bit -def : InstRW<[N2Write_4cyc_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>; // Duplicate, immediate and indexed form -def : InstRW<[N2Write_2cyc_1V], (instregex "^DUP_ZI_[BHSD]$", - "^DUP_ZZI_[BHSDQ]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^DUP_ZI_[BHSD]$", + "^DUP_ZZI_[BHSDQ]$")>; // Duplicate, scalar form -def : InstRW<[N2Write_3cyc_1M0], (instregex "^DUP_ZR_[BHSD]$")>; +def : InstRW<[N2Write_3c_1M0], (instregex "^DUP_ZR_[BHSD]$")>; // Extend, sign or zero -def : InstRW<[N2Write_2cyc_1V1], (instregex "^[SU]XTB_ZPmZ_[HSD]", - "^[SU]XTH_ZPmZ_[SD]", - "^[SU]XTW_ZPmZ_[D]")>; +def : InstRW<[N2Write_2c_1V1], (instregex "^[SU]XTB_ZPmZ_[HSD]", + "^[SU]XTH_ZPmZ_[SD]", + "^[SU]XTW_ZPmZ_[D]")>; // Extract -def : InstRW<[N2Write_2cyc_1V], (instrs EXT_ZZI, EXT_ZZI_B)>; +def : InstRW<[N2Write_2c_1V], (instrs EXT_ZZI, EXT_ZZI_B)>; // Extract narrow saturating -def : InstRW<[N2Write_4cyc_1V1], (instregex "^[SU]QXTN[BT]_ZZ_[BHS]$", - "^SQXTUN[BT]_ZZ_[BHS]$")>; +def : InstRW<[N2Write_4c_1V1], (instregex "^[SU]QXTN[BT]_ZZ_[BHS]$", + "^SQXTUN[BT]_ZZ_[BHS]$")>; // Extract/insert operation, SIMD and FP scalar form -def : InstRW<[N2Write_3cyc_1V1], (instregex "^LAST[AB]_VPZ_[BHSD]$", - "^INSR_ZV_[BHSD]$")>; +def : InstRW<[N2Write_3c_1V1], (instregex "^LAST[AB]_VPZ_[BHSD]$", + "^INSR_ZV_[BHSD]$")>; // Extract/insert operation, scalar -def : InstRW<[N2Write_5cyc_1V1_1M0], (instregex "^LAST[AB]_RPZ_[BHSD]$", - "^INSR_ZR_[BHSD]$")>; +def : InstRW<[N2Write_5c_1V1_1M0], (instregex "^LAST[AB]_RPZ_[BHSD]$", + "^INSR_ZR_[BHSD]$")>; // Histogram operations -def : InstRW<[N2Write_2cyc_1V], (instregex "^HISTCNT_ZPzZZ_[SD]$", - "^HISTSEG_ZZZ$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^HISTCNT_ZPzZZ_[SD]$", + "^HISTSEG_ZZZ$")>; // Horizontal operations, B, H, S form, immediate operands only -def : InstRW<[N2Write_4cyc_1V0], (instregex "^INDEX_II_[BHS]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^INDEX_II_[BHS]$")>; // Horizontal operations, B, H, S form, scalar, immediate operands/ scalar // operands only / immediate, scalar operands -def : InstRW<[N2Write_7cyc_1M0_1V0], (instregex "^INDEX_(IR|RI|RR)_[BHS]$")>; +def : InstRW<[N2Write_7c_1M0_1V0], (instregex "^INDEX_(IR|RI|RR)_[BHS]$")>; // Horizontal operations, D form, immediate operands only -def : InstRW<[N2Write_5cyc_2V0], (instrs INDEX_II_D)>; +def : InstRW<[N2Write_5c_2V0], (instrs INDEX_II_D)>; // Horizontal operations, D form, scalar, immediate operands)/ scalar operands // only / immediate, scalar operands -def : InstRW<[N2Write_8cyc_2M0_2V0], (instregex "^INDEX_(IR|RI|RR)_D$")>; +def : InstRW<[N2Write_8c_2M0_2V0], (instregex "^INDEX_(IR|RI|RR)_D$")>; // Logical -def : InstRW<[N2Write_2cyc_1V], +def : InstRW<[N2Write_2c_1V], (instregex "^(AND|EOR|ORR)_ZI", "^(AND|BIC|EOR|ORR)_ZZZ", "^EOR(BT|TB)_ZZZ_[BHSD]", @@ -1785,527 +1785,527 @@ def : InstRW<[N2Write_2cyc_1V], "^NOT_ZPmZ_[BHSD]")>; // Max/min, basic and pairwise -def : InstRW<[N2Write_2cyc_1V], (instregex "^[SU](MAX|MIN)_ZI_[BHSD]", - "^[SU](MAX|MIN)P?_ZPmZ_[BHSD]", - "^[SU](MAX|MIN)_ZPZZ_[BHSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^[SU](MAX|MIN)_ZI_[BHSD]", + "^[SU](MAX|MIN)P?_ZPmZ_[BHSD]", + "^[SU](MAX|MIN)_ZPZZ_[BHSD]")>; // Matching operations -def : InstRW<[N2Write_2cyc_1V0_1M], (instregex "^N?MATCH_PPzZZ_[BH]$")>; +def : InstRW<[N2Write_2c_1V0_1M], (instregex "^N?MATCH_PPzZZ_[BH]$")>; // Matrix multiply-accumulate -def : InstRW<[N2Write_3cyc_1V], (instrs SMMLA_ZZZ, UMMLA_ZZZ, USMMLA_ZZZ)>; +def : InstRW<[N2Write_3c_1V], (instrs SMMLA_ZZZ, UMMLA_ZZZ, USMMLA_ZZZ)>; // Move prefix -def : InstRW<[N2Write_2cyc_1V], (instregex "^MOVPRFX_ZP[mz]Z_[BHSD]$", - "^MOVPRFX_ZZ$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^MOVPRFX_ZP[mz]Z_[BHSD]$", + "^MOVPRFX_ZZ$")>; // Multiply, B, H, S element size -def : InstRW<[N2Write_4cyc_1V0], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_[BHS]", - "^MUL_ZPZZ_[BHS]", - "^[SU]MULH_(ZPmZ|ZZZ)_[BHS]", - "^[SU]MULH_ZPZZ_[BHS]")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_[BHS]", + "^MUL_ZPZZ_[BHS]", + "^[SU]MULH_(ZPmZ|ZZZ)_[BHS]", + "^[SU]MULH_ZPZZ_[BHS]")>; // Multiply, D element size -def : InstRW<[N2Write_5cyc_2V0], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_D", - "^MUL_ZPZZ_D", - "^[SU]MULH_(ZPmZ|ZZZ)_D", - "^[SU]MULH_ZPZZ_D")>; +def : InstRW<[N2Write_5c_2V0], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_D", + "^MUL_ZPZZ_D", + "^[SU]MULH_(ZPmZ|ZZZ)_D", + "^[SU]MULH_ZPZZ_D")>; // Multiply long -def : InstRW<[N2Write_4cyc_1V0], (instregex "^[SU]MULL[BT]_ZZZI_[SD]$", - "^[SU]MULL[BT]_ZZZ_[HSD]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]MULL[BT]_ZZZI_[SD]$", + "^[SU]MULL[BT]_ZZZ_[HSD]$")>; // Multiply accumulate, B, H, S element size -def : InstRW<[N2Write_4cyc_1V0], (instregex "^ML[AS]_ZZZI_[BHS]$", - "^(ML[AS]|MAD|MSB)_(ZPmZZ|ZPZZZ)_[BHS]")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^ML[AS]_ZZZI_[BHS]$", + "^(ML[AS]|MAD|MSB)_(ZPmZZ|ZPZZZ)_[BHS]")>; // Multiply accumulate, D element size -def : InstRW<[N2Write_5cyc_2V0], (instregex "^ML[AS]_ZZZI_D$", - "^(ML[AS]|MAD|MSB)_(ZPmZZ|ZPZZZ)_D")>; +def : InstRW<[N2Write_5c_2V0], (instregex "^ML[AS]_ZZZI_D$", + "^(ML[AS]|MAD|MSB)_(ZPmZZ|ZPZZZ)_D")>; // Multiply accumulate long -def : InstRW<[N2Write_4cyc_1V0], (instregex "^[SU]ML[AS]L[BT]_ZZZ_[HSD]$", - "^[SU]ML[AS]L[BT]_ZZZI_[SD]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]ML[AS]L[BT]_ZZZ_[HSD]$", + "^[SU]ML[AS]L[BT]_ZZZI_[SD]$")>; // Multiply accumulate saturating doubling long regular -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQDML[AS](LB|LT|LBT)_ZZZ_[HSD]$", - "^SQDML[AS](LB|LT)_ZZZI_[SD]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQDML[AS](LB|LT|LBT)_ZZZ_[HSD]$", + "^SQDML[AS](LB|LT)_ZZZI_[SD]$")>; // Multiply saturating doubling high, B, H, S element size -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQDMULH_ZZZ_[BHS]$", - "^SQDMULH_ZZZI_[HS]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQDMULH_ZZZ_[BHS]$", + "^SQDMULH_ZZZI_[HS]$")>; // Multiply saturating doubling high, D element size -def : InstRW<[N2Write_5cyc_2V0], (instrs SQDMULH_ZZZ_D, SQDMULH_ZZZI_D)>; +def : InstRW<[N2Write_5c_2V0], (instrs SQDMULH_ZZZ_D, SQDMULH_ZZZI_D)>; // Multiply saturating doubling long -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQDMULL[BT]_ZZZ_[HSD]$", - "^SQDMULL[BT]_ZZZI_[SD]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQDMULL[BT]_ZZZ_[HSD]$", + "^SQDMULL[BT]_ZZZI_[SD]$")>; // Multiply saturating rounding doubling regular/complex accumulate, B, H, S // element size -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQRDML[AS]H_ZZZ_[BHS]$", - "^SQRDCMLAH_ZZZ_[BHS]$", - "^SQRDML[AS]H_ZZZI_[HS]$", - "^SQRDCMLAH_ZZZI_[HS]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQRDML[AS]H_ZZZ_[BHS]$", + "^SQRDCMLAH_ZZZ_[BHS]$", + "^SQRDML[AS]H_ZZZI_[HS]$", + "^SQRDCMLAH_ZZZI_[HS]$")>; // Multiply saturating rounding doubling regular/complex accumulate, D element // size -def : InstRW<[N2Write_5cyc_2V0], (instregex "^SQRDML[AS]H_ZZZI?_D$", - "^SQRDCMLAH_ZZZ_D$")>; +def : InstRW<[N2Write_5c_2V0], (instregex "^SQRDML[AS]H_ZZZI?_D$", + "^SQRDCMLAH_ZZZ_D$")>; // Multiply saturating rounding doubling regular/complex, B, H, S element size -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SQRDMULH_ZZZ_[BHS]$", - "^SQRDMULH_ZZZI_[HS]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SQRDMULH_ZZZ_[BHS]$", + "^SQRDMULH_ZZZI_[HS]$")>; // Multiply saturating rounding doubling regular/complex, D element size -def : InstRW<[N2Write_5cyc_2V0], (instregex "^SQRDMULH_ZZZI?_D$")>; +def : InstRW<[N2Write_5c_2V0], (instregex "^SQRDMULH_ZZZI?_D$")>; // Multiply/multiply long, (8x8) polynomial -def : InstRW<[N2Write_2cyc_1V0], (instregex "^PMUL_ZZZ_B$", - "^PMULL[BT]_ZZZ_[HDQ]$")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^PMUL_ZZZ_B$", + "^PMULL[BT]_ZZZ_[HDQ]$")>; // Predicate counting vector -def : InstRW<[N2Write_2cyc_1V0], +def : InstRW<[N2Write_2c_1V0], (instregex "^(DEC|INC|SQDEC|SQINC|UQDEC|UQINC)[HWD]_ZPiI$")>; // Reciprocal estimate -def : InstRW<[N2Write_4cyc_2V0], (instregex "^URECPE_ZPmZ_S", "^URSQRTE_ZPmZ_S")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^URECPE_ZPmZ_S", "^URSQRTE_ZPmZ_S")>; // Reduction, arithmetic, B form -def : InstRW<[N2Write_11cyc_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_B")>; +def : InstRW<[N2Write_11c_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_B")>; // Reduction, arithmetic, H form -def : InstRW<[N2Write_9cyc_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_H")>; +def : InstRW<[N2Write_9c_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_H")>; // Reduction, arithmetic, S form -def : InstRW<[N2Write_8cyc_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_S")>; +def : InstRW<[N2Write_8c_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_S")>; // Reduction, arithmetic, D form -def : InstRW<[N2Write_8cyc_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_D")>; +def : InstRW<[N2Write_8c_2V_2V1], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_D")>; // Reduction, logical -def : InstRW<[N2Write_6cyc_1V_1V1], (instregex "^(ANDV|EORV|ORV)_VPZ_[BHSD]$")>; +def : InstRW<[N2Write_6c_1V_1V1], (instregex "^(ANDV|EORV|ORV)_VPZ_[BHSD]$")>; // Reverse, vector -def : InstRW<[N2Write_2cyc_1V], (instregex "^REV_ZZ_[BHSD]$", - "^REVB_ZPmZ_[HSD]$", - "^REVH_ZPmZ_[SD]$", - "^REVW_ZPmZ_D$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^REV_ZZ_[BHSD]$", + "^REVB_ZPmZ_[HSD]$", + "^REVH_ZPmZ_[SD]$", + "^REVW_ZPmZ_D$")>; // Select, vector form -def : InstRW<[N2Write_2cyc_1V], (instregex "^SEL_ZPZZ_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^SEL_ZPZZ_[BHSD]$")>; // Table lookup -def : InstRW<[N2Write_2cyc_1V], (instregex "^TBL_ZZZZ?_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^TBL_ZZZZ?_[BHSD]$")>; // Table lookup extension -def : InstRW<[N2Write_2cyc_1V], (instregex "^TBX_ZZZ_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^TBX_ZZZ_[BHSD]$")>; // Transpose, vector form -def : InstRW<[N2Write_2cyc_1V], (instregex "^TRN[12]_ZZZ_[BHSDQ]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^TRN[12]_ZZZ_[BHSDQ]$")>; // Unpack and extend -def : InstRW<[N2Write_2cyc_1V], (instregex "^[SU]UNPK(HI|LO)_ZZ_[HSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^[SU]UNPK(HI|LO)_ZZ_[HSD]$")>; // Zip/unzip -def : InstRW<[N2Write_2cyc_1V], (instregex "^(UZP|ZIP)[12]_ZZZ_[BHSDQ]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^(UZP|ZIP)[12]_ZZZ_[BHSDQ]$")>; // SVE floating-point instructions // ----------------------------------------------------------------------------- // Floating point absolute value/difference -def : InstRW<[N2Write_2cyc_1V], (instregex "^FAB[SD]_ZPmZ_[HSD]", - "^FABD_ZPZZ_[HSD]", - "^FABS_ZPmZ_[HSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^FAB[SD]_ZPmZ_[HSD]", + "^FABD_ZPZZ_[HSD]", + "^FABS_ZPmZ_[HSD]")>; // Floating point arithmetic -def : InstRW<[N2Write_2cyc_1V], (instregex "^F(ADD|SUB)_(ZPm[IZ]|ZZZ)_[HSD]", - "^F(ADD|SUB)_ZPZ[IZ]_[HSD]", - "^FADDP_ZPmZZ_[HSD]", - "^FNEG_ZPmZ_[HSD]", - "^FSUBR_ZPm[IZ]_[HSD]", - "^FSUBR_(ZPZI|ZPZZ)_[HSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^F(ADD|SUB)_(ZPm[IZ]|ZZZ)_[HSD]", + "^F(ADD|SUB)_ZPZ[IZ]_[HSD]", + "^FADDP_ZPmZZ_[HSD]", + "^FNEG_ZPmZ_[HSD]", + "^FSUBR_ZPm[IZ]_[HSD]", + "^FSUBR_(ZPZI|ZPZZ)_[HSD]")>; // Floating point associative add, F16 -def : InstRW<[N2Write_10cyc_1V1], (instrs FADDA_VPZ_H)>; +def : InstRW<[N2Write_10c_1V1], (instrs FADDA_VPZ_H)>; // Floating point associative add, F32 -def : InstRW<[N2Write_6cyc_1V1], (instrs FADDA_VPZ_S)>; +def : InstRW<[N2Write_6c_1V1], (instrs FADDA_VPZ_S)>; // Floating point associative add, F64 -def : InstRW<[N2Write_4cyc_1V], (instrs FADDA_VPZ_D)>; +def : InstRW<[N2Write_4c_1V], (instrs FADDA_VPZ_D)>; // Floating point compare -def : InstRW<[N2Write_2cyc_1V0], (instregex "^FACG[ET]_PPzZZ_[HSD]$", - "^FCM(EQ|GE|GT|NE)_PPzZ[0Z]_[HSD]$", - "^FCM(LE|LT)_PPzZ0_[HSD]$", - "^FCMUO_PPzZZ_[HSD]$")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^FACG[ET]_PPzZZ_[HSD]$", + "^FCM(EQ|GE|GT|NE)_PPzZ[0Z]_[HSD]$", + "^FCM(LE|LT)_PPzZ0_[HSD]$", + "^FCMUO_PPzZZ_[HSD]$")>; // Floating point complex add -def : InstRW<[N2Write_3cyc_1V], (instregex "^FCADD_ZPmZ_[HSD]$")>; +def : InstRW<[N2Write_3c_1V], (instregex "^FCADD_ZPmZ_[HSD]$")>; // Floating point complex multiply add -def : InstRW<[N2Write_5cyc_1V], (instregex "^FCMLA_ZPmZZ_[HSD]$", - "^FCMLA_ZZZI_[HS]$")>; +def : InstRW<[N2Write_5c_1V], (instregex "^FCMLA_ZPmZZ_[HSD]$", + "^FCMLA_ZZZI_[HS]$")>; // Floating point convert, long or narrow (F16 to F32 or F32 to F16) -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FCVT_ZPmZ_(HtoS|StoH)", - "^FCVTLT_ZPmZ_HtoS", - "^FCVTNT_ZPmZ_StoH")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FCVT_ZPmZ_(HtoS|StoH)", + "^FCVTLT_ZPmZ_HtoS", + "^FCVTNT_ZPmZ_StoH")>; // Floating point convert, long or narrow (F16 to F64, F32 to F64, F64 to F32 // or F64 to F16) -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FCVT_ZPmZ_(HtoD|StoD|DtoS|DtoH)", - "^FCVTLT_ZPmZ_StoD", - "^FCVTNT_ZPmZ_DtoS")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FCVT_ZPmZ_(HtoD|StoD|DtoS|DtoH)", + "^FCVTLT_ZPmZ_StoD", + "^FCVTNT_ZPmZ_DtoS")>; // Floating point convert, round to odd -def : InstRW<[N2Write_3cyc_1V0], (instrs FCVTX_ZPmZ_DtoS, FCVTXNT_ZPmZ_DtoS)>; +def : InstRW<[N2Write_3c_1V0], (instrs FCVTX_ZPmZ_DtoS, FCVTXNT_ZPmZ_DtoS)>; // Floating point base2 log, F16 -def : InstRW<[N2Write_6cyc_4V0], (instregex "^FLOGB_(ZPmZ|ZPZZ)_H")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^FLOGB_(ZPmZ|ZPZZ)_H")>; // Floating point base2 log, F32 -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FLOGB_(ZPmZ|ZPZZ)_S")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FLOGB_(ZPmZ|ZPZZ)_S")>; // Floating point base2 log, F64 -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FLOGB_(ZPmZ|ZPZZ)_D")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FLOGB_(ZPmZ|ZPZZ)_D")>; // Floating point convert to integer, F16 -def : InstRW<[N2Write_6cyc_4V0], (instregex "^FCVTZ[SU]_ZPmZ_HtoH")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^FCVTZ[SU]_ZPmZ_HtoH")>; // Floating point convert to integer, F32 -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FCVTZ[SU]_ZPmZ_(HtoS|StoS)")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FCVTZ[SU]_ZPmZ_(HtoS|StoS)")>; // Floating point convert to integer, F64 -def : InstRW<[N2Write_3cyc_1V0], +def : InstRW<[N2Write_3c_1V0], (instregex "^FCVTZ[SU]_ZPmZ_(HtoD|StoD|DtoS|DtoD)")>; // Floating point copy -def : InstRW<[N2Write_2cyc_1V], (instregex "^FCPY_ZPmI_[HSD]$", - "^FDUP_ZI_[HSD]$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^FCPY_ZPmI_[HSD]$", + "^FDUP_ZI_[HSD]$")>; // Floating point divide, F16 -def : InstRW<[N2Write_13cyc_1V0], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_H")>; +def : InstRW<[N2Write_13c_1V0], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_H")>; // Floating point divide, F32 -def : InstRW<[N2Write_10cyc_1V0], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_S")>; +def : InstRW<[N2Write_10c_1V0], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_S")>; // Floating point divide, F64 -def : InstRW<[N2Write_15cyc_1V0], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_D")>; +def : InstRW<[N2Write_15c_1V0], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_D")>; // Floating point min/max pairwise -def : InstRW<[N2Write_2cyc_1V], (instregex "^F(MAX|MIN)(NM)?P_ZPmZZ_[HSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^F(MAX|MIN)(NM)?P_ZPmZZ_[HSD]")>; // Floating point min/max -def : InstRW<[N2Write_2cyc_1V], (instregex "^F(MAX|MIN)(NM)?_ZPm[IZ]_[HSD]", - "^F(MAX|MIN)(NM)?_ZPZ[IZ]_[HSD]")>; +def : InstRW<[N2Write_2c_1V], (instregex "^F(MAX|MIN)(NM)?_ZPm[IZ]_[HSD]", + "^F(MAX|MIN)(NM)?_ZPZ[IZ]_[HSD]")>; // Floating point multiply -def : InstRW<[N2Write_3cyc_1V], (instregex "^(FSCALE|FMULX)_ZPmZ_[HSD]", - "^FMULX_ZPZZ_[HSD]", - "^FMUL_(ZPm[IZ]|ZZZI?)_[HSD]", - "^FMUL_ZPZ[IZ]_[HSD]")>; +def : InstRW<[N2Write_3c_1V], (instregex "^(FSCALE|FMULX)_ZPmZ_[HSD]", + "^FMULX_ZPZZ_[HSD]", + "^FMUL_(ZPm[IZ]|ZZZI?)_[HSD]", + "^FMUL_ZPZ[IZ]_[HSD]")>; // Floating point multiply accumulate -def : InstRW<[N2Write_4cyc_1V], (instregex "^F(N?M(AD|SB)|N?ML[AS])_ZPmZZ_[HSD]$", - "^FN?ML[AS]_ZPZZZ_[HSD]", - "^FML[AS]_ZZZI_[HSD]$")>; +def : InstRW<[N2Write_4c_1V], (instregex "^F(N?M(AD|SB)|N?ML[AS])_ZPmZZ_[HSD]$", + "^FN?ML[AS]_ZPZZZ_[HSD]", + "^FML[AS]_ZZZI_[HSD]$")>; // Floating point multiply add/sub accumulate long -def : InstRW<[N2Write_4cyc_1V], (instregex "^FML[AS]L[BT]_ZZZI?_SHH$")>; +def : InstRW<[N2Write_4c_1V], (instregex "^FML[AS]L[BT]_ZZZI?_SHH$")>; // Floating point reciprocal estimate, F16 -def : InstRW<[N2Write_6cyc_4V0], (instregex "^FR(ECP|SQRT)E_ZZ_H", "^FRECPX_ZPmZ_H")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^FR(ECP|SQRT)E_ZZ_H", "^FRECPX_ZPmZ_H")>; // Floating point reciprocal estimate, F32 -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FR(ECP|SQRT)E_ZZ_S", "^FRECPX_ZPmZ_S")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FR(ECP|SQRT)E_ZZ_S", "^FRECPX_ZPmZ_S")>; // Floating point reciprocal estimate, F64 -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FR(ECP|SQRT)E_ZZ_D", "^FRECPX_ZPmZ_D")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FR(ECP|SQRT)E_ZZ_D", "^FRECPX_ZPmZ_D")>; // Floating point reciprocal step -def : InstRW<[N2Write_4cyc_1V0], (instregex "^F(RECPS|RSQRTS)_ZZZ_[HSD]$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^F(RECPS|RSQRTS)_ZZZ_[HSD]$")>; // Floating point reduction, F16 -def : InstRW<[N2Write_6cyc_2V], +def : InstRW<[N2Write_6c_2V], (instregex "^(FADDV|FMAXNMV|FMAXV|FMINNMV|FMINV)_VPZ_H$")>; // Floating point reduction, F32 -def : InstRW<[N2Write_4cyc_1V], +def : InstRW<[N2Write_4c_1V], (instregex "^(FADDV|FMAXNMV|FMAXV|FMINNMV|FMINV)_VPZ_S$")>; // Floating point reduction, F64 -def : InstRW<[N2Write_2cyc_1V], +def : InstRW<[N2Write_2c_1V], (instregex "^(FADDV|FMAXNMV|FMAXV|FMINNMV|FMINV)_VPZ_D$")>; // Floating point round to integral, F16 -def : InstRW<[N2Write_6cyc_4V0], (instregex "^FRINT[AIMNPXZ]_ZPmZ_H")>; +def : InstRW<[N2Write_6c_4V0], (instregex "^FRINT[AIMNPXZ]_ZPmZ_H")>; // Floating point round to integral, F32 -def : InstRW<[N2Write_4cyc_2V0], (instregex "^FRINT[AIMNPXZ]_ZPmZ_S")>; +def : InstRW<[N2Write_4c_2V0], (instregex "^FRINT[AIMNPXZ]_ZPmZ_S")>; // Floating point round to integral, F64 -def : InstRW<[N2Write_3cyc_1V0], (instregex "^FRINT[AIMNPXZ]_ZPmZ_D")>; +def : InstRW<[N2Write_3c_1V0], (instregex "^FRINT[AIMNPXZ]_ZPmZ_D")>; // Floating point square root, F16 -def : InstRW<[N2Write_13cyc_1V0], (instregex "^FSQRT_ZPmZ_H")>; +def : InstRW<[N2Write_13c_1V0], (instregex "^FSQRT_ZPmZ_H")>; // Floating point square root, F32 -def : InstRW<[N2Write_10cyc_1V0], (instregex "^FSQRT_ZPmZ_S")>; +def : InstRW<[N2Write_10c_1V0], (instregex "^FSQRT_ZPmZ_S")>; // Floating point square root, F64 -def : InstRW<[N2Write_16cyc_1V0], (instregex "^FSQRT_ZPmZ_D")>; +def : InstRW<[N2Write_16c_1V0], (instregex "^FSQRT_ZPmZ_D")>; // Floating point trigonometric exponentiation -def : InstRW<[N2Write_3cyc_1V1], (instregex "^FEXPA_ZZ_[HSD]$")>; +def : InstRW<[N2Write_3c_1V1], (instregex "^FEXPA_ZZ_[HSD]$")>; // Floating point trigonometric multiply add -def : InstRW<[N2Write_4cyc_1V], (instregex "^FTMAD_ZZI_[HSD]$")>; +def : InstRW<[N2Write_4c_1V], (instregex "^FTMAD_ZZI_[HSD]$")>; // Floating point trigonometric, miscellaneous -def : InstRW<[N2Write_3cyc_1V], (instregex "^FTS(MUL|SEL)_ZZZ_[HSD]$")>; +def : InstRW<[N2Write_3c_1V], (instregex "^FTS(MUL|SEL)_ZZZ_[HSD]$")>; // SVE BFloat16 (BF16) instructions // ----------------------------------------------------------------------------- // Convert, F32 to BF16 -def : InstRW<[N2Write_3cyc_1V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; +def : InstRW<[N2Write_3c_1V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; // Dot product -def : InstRW<[N2Write_4cyc_1V], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; +def : InstRW<[N2Write_4c_1V], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; // Matrix multiply accumulate -def : InstRW<[N2Write_5cyc_1V], (instrs BFMMLA_ZZZ)>; +def : InstRW<[N2Write_5c_1V], (instrs BFMMLA_ZZZ)>; // Multiply accumulate long -def : InstRW<[N2Write_4cyc_1V], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>; +def : InstRW<[N2Write_4c_1V], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>; // SVE Load instructions // ----------------------------------------------------------------------------- // Load vector -def : InstRW<[N2Write_6cyc_1L], (instrs LDR_ZXI)>; +def : InstRW<[N2Write_6c_1L], (instrs LDR_ZXI)>; // Load predicate -def : InstRW<[N2Write_6cyc_1L_1M], (instrs LDR_PXI)>; +def : InstRW<[N2Write_6c_1L_1M], (instrs LDR_PXI)>; // Contiguous load, scalar + imm -def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1[BHWD]_IMM$", - "^LD1S?B_[HSD]_IMM$", - "^LD1S?H_[SD]_IMM$", - "^LD1S?W_D_IMM$" )>; +def : InstRW<[N2Write_6c_1L], (instregex "^LD1[BHWD]_IMM$", + "^LD1S?B_[HSD]_IMM$", + "^LD1S?H_[SD]_IMM$", + "^LD1S?W_D_IMM$" )>; // Contiguous load, scalar + scalar -def : InstRW<[N2Write_6cyc_1L01], (instregex "^LD1[BHWD]$", - "^LD1S?B_[HSD]$", - "^LD1S?H_[SD]$", - "^LD1S?W_D$" )>; +def : InstRW<[N2Write_6c_1L01], (instregex "^LD1[BHWD]$", + "^LD1S?B_[HSD]$", + "^LD1S?H_[SD]$", + "^LD1S?W_D$" )>; // Contiguous load broadcast, scalar + imm -def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1R[BHWD]_IMM$", - "^LD1RSW_IMM$", - "^LD1RS?B_[HSD]_IMM$", - "^LD1RS?H_[SD]_IMM$", - "^LD1RS?W_D_IMM$", - "^LD1RQ_[BHWD]_IMM$")>; +def : InstRW<[N2Write_6c_1L], (instregex "^LD1R[BHWD]_IMM$", + "^LD1RSW_IMM$", + "^LD1RS?B_[HSD]_IMM$", + "^LD1RS?H_[SD]_IMM$", + "^LD1RS?W_D_IMM$", + "^LD1RQ_[BHWD]_IMM$")>; // Contiguous load broadcast, scalar + scalar -def : InstRW<[N2Write_6cyc_1L], (instregex "^LD1RQ_[BHWD]$")>; +def : InstRW<[N2Write_6c_1L], (instregex "^LD1RQ_[BHWD]$")>; // Non temporal load, scalar + imm -def : InstRW<[N2Write_6cyc_1L], (instregex "^LDNT1[BHWD]_ZRI$")>; +def : InstRW<[N2Write_6c_1L], (instregex "^LDNT1[BHWD]_ZRI$")>; // Non temporal load, scalar + scalar -def : InstRW<[N2Write_6cyc_1L_1S], (instregex "^LDNT1[BHWD]_ZRR$")>; +def : InstRW<[N2Write_6c_1L_1S], (instregex "^LDNT1[BHWD]_ZRR$")>; // Non temporal gather load, vector + scalar 32-bit element size -def : InstRW<[N2Write_9cyc_1L_1V], (instregex "^LDNT1[BHW]_ZZR_S$", - "^LDNT1S[BH]_ZZR_S$")>; +def : InstRW<[N2Write_9c_1L_1V], (instregex "^LDNT1[BHW]_ZZR_S$", + "^LDNT1S[BH]_ZZR_S$")>; // Non temporal gather load, vector + scalar 64-bit element size -def : InstRW<[N2Write_10cyc_2L_2V1], (instregex "^LDNT1S?[BHW]_ZZR_D$")>; -def : InstRW<[N2Write_10cyc_2L_2V1], (instrs LDNT1D_ZZR_D)>; +def : InstRW<[N2Write_10c_2L_2V1], (instregex "^LDNT1S?[BHW]_ZZR_D$")>; +def : InstRW<[N2Write_10c_2L_2V1], (instrs LDNT1D_ZZR_D)>; // Contiguous first faulting load, scalar + scalar -def : InstRW<[N2Write_6cyc_1L_1S], (instregex "^LDFF1[BHWD]$", - "^LDFF1S?B_[HSD]$", - "^LDFF1S?H_[SD]$", - "^LDFF1S?W_D$")>; +def : InstRW<[N2Write_6c_1L_1S], (instregex "^LDFF1[BHWD]$", + "^LDFF1S?B_[HSD]$", + "^LDFF1S?H_[SD]$", + "^LDFF1S?W_D$")>; // Contiguous non faulting load, scalar + imm -def : InstRW<[N2Write_6cyc_1L], (instregex "^LDNF1[BHWD]_IMM$", - "^LDNF1S?B_[HSD]_IMM$", - "^LDNF1S?H_[SD]_IMM$", - "^LDNF1S?W_D_IMM$")>; +def : InstRW<[N2Write_6c_1L], (instregex "^LDNF1[BHWD]_IMM$", + "^LDNF1S?B_[HSD]_IMM$", + "^LDNF1S?H_[SD]_IMM$", + "^LDNF1S?W_D_IMM$")>; // Contiguous Load two structures to two vectors, scalar + imm -def : InstRW<[N2Write_8cyc_1L_1V], (instregex "^LD2[BHWD]_IMM$")>; +def : InstRW<[N2Write_8c_1L_1V], (instregex "^LD2[BHWD]_IMM$")>; // Contiguous Load two structures to two vectors, scalar + scalar -def : InstRW<[N2Write_9cyc_1L_1V], (instregex "^LD2[BHWD]$")>; +def : InstRW<[N2Write_9c_1L_1V], (instregex "^LD2[BHWD]$")>; // Contiguous Load three structures to three vectors, scalar + imm -def : InstRW<[N2Write_9cyc_1L_1V], (instregex "^LD3[BHWD]_IMM$")>; +def : InstRW<[N2Write_9c_1L_1V], (instregex "^LD3[BHWD]_IMM$")>; // Contiguous Load three structures to three vectors, scalar + scalar -def : InstRW<[N2Write_10cyc_1V_1L_1S], (instregex "^LD3[BHWD]$")>; +def : InstRW<[N2Write_10c_1V_1L_1S], (instregex "^LD3[BHWD]$")>; // Contiguous Load four structures to four vectors, scalar + imm -def : InstRW<[N2Write_9cyc_2L_2V], (instregex "^LD4[BHWD]_IMM$")>; +def : InstRW<[N2Write_9c_2L_2V], (instregex "^LD4[BHWD]_IMM$")>; // Contiguous Load four structures to four vectors, scalar + scalar -def : InstRW<[N2Write_10cyc_2L_2V_2S], (instregex "^LD4[BHWD]$")>; +def : InstRW<[N2Write_10c_2L_2V_2S], (instregex "^LD4[BHWD]$")>; // Gather load, vector + imm, 32-bit element size -def : InstRW<[N2Write_9cyc_1L_1V], (instregex "^GLD(FF)?1S?[BH]_S_IMM$", - "^GLD(FF)?1W_IMM$")>; +def : InstRW<[N2Write_9c_1L_1V], (instregex "^GLD(FF)?1S?[BH]_S_IMM$", + "^GLD(FF)?1W_IMM$")>; // Gather load, vector + imm, 64-bit element size -def : InstRW<[N2Write_9cyc_2L_2V], (instregex "^GLD(FF)?1S?[BHW]_D_IMM$", - "^GLD(FF)?1D_IMM$")>; +def : InstRW<[N2Write_9c_2L_2V], (instregex "^GLD(FF)?1S?[BHW]_D_IMM$", + "^GLD(FF)?1D_IMM$")>; // Gather load, 64-bit element size -def : InstRW<[N2Write_9cyc_2L_2V], +def : InstRW<[N2Write_9c_2L_2V], (instregex "^GLD(FF)?1S?[BHW]_D_[SU]XTW(_SCALED)?$", "^GLD(FF)?1S?[BHW]_D(_SCALED)?$", "^GLD(FF)?1D_[SU]XTW(_SCALED)?$", "^GLD(FF)?1D(_SCALED)?$")>; // Gather load, 32-bit scaled offset -def : InstRW<[N2Write_10cyc_2L_2V], +def : InstRW<[N2Write_10c_2L_2V], (instregex "^GLD(FF)?1S?[HW]_S_[SU]XTW_SCALED$", "^GLD(FF)?1W_[SU]XTW_SCALED")>; // Gather load, 32-bit unpacked unscaled offset -def : InstRW<[N2Write_9cyc_1L_1V], (instregex "^GLD(FF)?1S?[BH]_S_[SU]XTW$", - "^GLD(FF)?1W_[SU]XTW$")>; +def : InstRW<[N2Write_9c_1L_1V], (instregex "^GLD(FF)?1S?[BH]_S_[SU]XTW$", + "^GLD(FF)?1W_[SU]XTW$")>; // SVE Store instructions // ----------------------------------------------------------------------------- // Store from predicate reg -def : InstRW<[N2Write_1cyc_1L01], (instrs STR_PXI)>; +def : InstRW<[N2Write_1c_1L01], (instrs STR_PXI)>; // Store from vector reg -def : InstRW<[N2Write_2cyc_1L01_1V], (instrs STR_ZXI)>; +def : InstRW<[N2Write_2c_1L01_1V], (instrs STR_ZXI)>; // Contiguous store, scalar + imm -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^ST1[BHWD]_IMM$", - "^ST1B_[HSD]_IMM$", - "^ST1H_[SD]_IMM$", - "^ST1W_D_IMM$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^ST1[BHWD]_IMM$", + "^ST1B_[HSD]_IMM$", + "^ST1H_[SD]_IMM$", + "^ST1W_D_IMM$")>; // Contiguous store, scalar + scalar -def : InstRW<[N2Write_2cyc_1L01_1S_1V], (instregex "^ST1H(_[SD])?$")>; -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^ST1[BWD]$", - "^ST1B_[HSD]$", - "^ST1W_D$")>; +def : InstRW<[N2Write_2c_1L01_1S_1V], (instregex "^ST1H(_[SD])?$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^ST1[BWD]$", + "^ST1B_[HSD]$", + "^ST1W_D$")>; // Contiguous store two structures from two vectors, scalar + imm -def : InstRW<[N2Write_4cyc_1L01_1V], (instregex "^ST2[BHWD]_IMM$")>; +def : InstRW<[N2Write_4c_1L01_1V], (instregex "^ST2[BHWD]_IMM$")>; // Contiguous store two structures from two vectors, scalar + scalar -def : InstRW<[N2Write_4cyc_1L01_1S_1V], (instrs ST2H)>; +def : InstRW<[N2Write_4c_1L01_1S_1V], (instrs ST2H)>; // Contiguous store two structures from two vectors, scalar + scalar -def : InstRW<[N2Write_4cyc_1L01_1V], (instregex "^ST2[BWD]$")>; +def : InstRW<[N2Write_4c_1L01_1V], (instregex "^ST2[BWD]$")>; // Contiguous store three structures from three vectors, scalar + imm -def : InstRW<[N2Write_7cyc_5L01_5V], (instregex "^ST3[BHWD]_IMM$")>; +def : InstRW<[N2Write_7c_5L01_5V], (instregex "^ST3[BHWD]_IMM$")>; // Contiguous store three structures from three vectors, scalar + scalar -def : InstRW<[N2Write_7cyc_5L01_5S_5V], (instrs ST3H)>; +def : InstRW<[N2Write_7c_5L01_5S_5V], (instrs ST3H)>; // Contiguous store three structures from three vectors, scalar + scalar -def : InstRW<[N2Write_7cyc_5L01_5S_5V], (instregex "^ST3[BWD]$")>; +def : InstRW<[N2Write_7c_5L01_5S_5V], (instregex "^ST3[BWD]$")>; // Contiguous store four structures from four vectors, scalar + imm -def : InstRW<[N2Write_11cyc_9L01_9V], (instregex "^ST4[BHWD]_IMM$")>; +def : InstRW<[N2Write_11c_9L01_9V], (instregex "^ST4[BHWD]_IMM$")>; // Contiguous store four structures from four vectors, scalar + scalar -def : InstRW<[N2Write_11cyc_9L01_9S_9V], (instrs ST4H)>; +def : InstRW<[N2Write_11c_9L01_9S_9V], (instrs ST4H)>; // Contiguous store four structures from four vectors, scalar + scalar -def : InstRW<[N2Write_11cyc_9L01_9S_9V], (instregex "^ST4[BWD]$")>; +def : InstRW<[N2Write_11c_9L01_9S_9V], (instregex "^ST4[BWD]$")>; // Non temporal store, scalar + imm -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STNT1[BHWD]_ZRI$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^STNT1[BHWD]_ZRI$")>; // Non temporal store, scalar + scalar -def : InstRW<[N2Write_2cyc_1L01_1S_1V], (instrs STNT1H_ZRR)>; -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STNT1[BWD]_ZRR$")>; +def : InstRW<[N2Write_2c_1L01_1S_1V], (instrs STNT1H_ZRR)>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^STNT1[BWD]_ZRR$")>; // Scatter non temporal store, vector + scalar 32-bit element size -def : InstRW<[N2Write_4cyc_2L01_2V], (instregex "^STNT1[BHW]_ZZR_S")>; +def : InstRW<[N2Write_4c_2L01_2V], (instregex "^STNT1[BHW]_ZZR_S")>; // Scatter non temporal store, vector + scalar 64-bit element size -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^STNT1[BHWD]_ZZR_D")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^STNT1[BHWD]_ZZR_D")>; // Scatter store vector + imm 32-bit element size -def : InstRW<[N2Write_4cyc_2L01_2V], (instregex "^SST1[BH]_S_IMM$", - "^SST1W_IMM$")>; +def : InstRW<[N2Write_4c_2L01_2V], (instregex "^SST1[BH]_S_IMM$", + "^SST1W_IMM$")>; // Scatter store vector + imm 64-bit element size -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^SST1[BHW]_D_IMM$", - "^SST1D_IMM$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^SST1[BHW]_D_IMM$", + "^SST1D_IMM$")>; // Scatter store, 32-bit scaled offset -def : InstRW<[N2Write_4cyc_2L01_2V], +def : InstRW<[N2Write_4c_2L01_2V], (instregex "^SST1(H_S|W)_[SU]XTW_SCALED$")>; // Scatter store, 32-bit unpacked unscaled offset -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^SST1[BHW]_D_[SU]XTW$", - "^SST1D_[SU]XTW$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^SST1[BHW]_D_[SU]XTW$", + "^SST1D_[SU]XTW$")>; // Scatter store, 32-bit unpacked scaled offset -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^SST1[HW]_D_[SU]XTW_SCALED$", - "^SST1D_[SU]XTW_SCALED$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^SST1[HW]_D_[SU]XTW_SCALED$", + "^SST1D_[SU]XTW_SCALED$")>; // Scatter store, 32-bit unscaled offset -def : InstRW<[N2Write_4cyc_2L01_2V], (instregex "^SST1[BH]_S_[SU]XTW$", - "^SST1W_[SU]XTW$")>; +def : InstRW<[N2Write_4c_2L01_2V], (instregex "^SST1[BH]_S_[SU]XTW$", + "^SST1W_[SU]XTW$")>; // Scatter store, 64-bit scaled offset -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^SST1[HW]_D_SCALED$", - "^SST1D_SCALED$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^SST1[HW]_D_SCALED$", + "^SST1D_SCALED$")>; // Scatter store, 64-bit unscaled offset -def : InstRW<[N2Write_2cyc_1L01_1V], (instregex "^SST1[BHW]_D$", - "^SST1D$")>; +def : InstRW<[N2Write_2c_1L01_1V], (instregex "^SST1[BHW]_D$", + "^SST1D$")>; // SVE Miscellaneous instructions // ----------------------------------------------------------------------------- // Read first fault register, unpredicated -def : InstRW<[N2Write_2cyc_1M0], (instrs RDFFR_P)>; +def : InstRW<[N2Write_2c_1M0], (instrs RDFFR_P)>; // Read first fault register, predicated -def : InstRW<[N2Write_3cyc_1M0_1M], (instrs RDFFR_PPz)>; +def : InstRW<[N2Write_3c_1M0_1M], (instrs RDFFR_PPz)>; // Read first fault register and set flags -def : InstRW<[N2Write_4cyc_2M0_2M], (instrs RDFFRS_PPz)>; +def : InstRW<[N2Write_4c_2M0_2M], (instrs RDFFRS_PPz)>; // Set first fault register // Write to first fault register -def : InstRW<[N2Write_2cyc_1M0], (instrs SETFFR, WRFFR)>; +def : InstRW<[N2Write_2c_1M0], (instrs SETFFR, WRFFR)>; // Prefetch -def : InstRW<[N2Write_4cyc_1L], (instregex "^PRF[BHWD]")>; +def : InstRW<[N2Write_4c_1L], (instregex "^PRF[BHWD]")>; // SVE Cryptographic instructions // ----------------------------------------------------------------------------- // Crypto AES ops -def : InstRW<[N2Write_2cyc_1V], (instregex "^AES[DE]_ZZZ_B$", - "^AESI?MC_ZZ_B$")>; +def : InstRW<[N2Write_2c_1V], (instregex "^AES[DE]_ZZZ_B$", + "^AESI?MC_ZZ_B$")>; // Crypto SHA3 ops -def : InstRW<[N2Write_2cyc_1V0], (instregex "^(BCAX|EOR3)_ZZZZ$", - "^RAX1_ZZZ_D$", - "^XAR_ZZZI_[BHSD]$")>; +def : InstRW<[N2Write_2c_1V0], (instregex "^(BCAX|EOR3)_ZZZZ$", + "^RAX1_ZZZ_D$", + "^XAR_ZZZI_[BHSD]$")>; // Crypto SM4 ops -def : InstRW<[N2Write_4cyc_1V0], (instregex "^SM4E(KEY)?_ZZZ_S$")>; +def : InstRW<[N2Write_4c_1V0], (instregex "^SM4E(KEY)?_ZZZ_S$")>; } diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td index 66f3914..f884d20 100644 --- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td +++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td @@ -91,292 +91,292 @@ def : WriteRes<WriteLDHi, []> { let Latency = 4; } //===----------------------------------------------------------------------===// // Define generic 0 micro-op types -def V2Write_0cyc : SchedWriteRes<[]> { let Latency = 0; } +def V2Write_0c : SchedWriteRes<[]> { let Latency = 0; } // Define generic 1 micro-op types -def V2Write_1cyc_1B : SchedWriteRes<[V2UnitB]> { let Latency = 1; } -def V2Write_1cyc_1F : SchedWriteRes<[V2UnitF]> { let Latency = 1; } -def V2Write_1cyc_1I : SchedWriteRes<[V2UnitI]> { let Latency = 1; } -def V2Write_1cyc_1M : SchedWriteRes<[V2UnitM]> { let Latency = 1; } -def V2Write_1cyc_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 1; } -def V2Write_1cyc_1L01 : SchedWriteRes<[V2UnitL01]> { let Latency = 1; } -def V2Write_2cyc_1M : SchedWriteRes<[V2UnitM]> { let Latency = 2; } -def V2Write_3cyc_1M : SchedWriteRes<[V2UnitM]> { let Latency = 3; } -def V2Write_2cyc_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 2; } -def V2Write_3cyc_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 3; } -def V2Write_5cyc_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 5; } -def V2Write_12cyc_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 12; - let ReleaseAtCycles = [12]; } -def V2Write_20cyc_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 20; - let ReleaseAtCycles = [20]; } -def V2Write_4cyc_1L : SchedWriteRes<[V2UnitL]> { let Latency = 4; } -def V2Write_6cyc_1L : SchedWriteRes<[V2UnitL]> { let Latency = 6; } -def V2Write_2cyc_1V : SchedWriteRes<[V2UnitV]> { let Latency = 2; } -def V2Write_2cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 2; } -def V2Write_2cyc_1V01 : SchedWriteRes<[V2UnitV01]> { let Latency = 2; } -def V2Write_2cyc_1V23 : SchedWriteRes<[V2UnitV23]> { let Latency = 2; } -def V2Write_3cyc_1V : SchedWriteRes<[V2UnitV]> { let Latency = 3; } -def V2Write_3cyc_1V01 : SchedWriteRes<[V2UnitV01]> { let Latency = 3; - let ReleaseAtCycles = [2]; } -def V2Write_3cyc_1V23 : SchedWriteRes<[V2UnitV23]> { let Latency = 3; } -def V2Write_4cyc_1V : SchedWriteRes<[V2UnitV]> { let Latency = 4; } -def V2Write_5cyc_1V : SchedWriteRes<[V2UnitV]> { let Latency = 5; } -def V2Write_6cyc_1V : SchedWriteRes<[V2UnitV]> { let Latency = 6; } -def V2Write_12cyc_1V : SchedWriteRes<[V2UnitV]> { let Latency = 12; } -def V2Write_3cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 3; } -def V2Write_3cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 3; } -def V2Write_4cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 4; } -def V2Write_4cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 4; } -def V2Write_7cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 7; - let ReleaseAtCycles = [7]; } -def V2Write_7cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 7; - let ReleaseAtCycles = [2]; } -def V2Write_9cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 9; } -def V2Write_9cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 9; - let ReleaseAtCycles = [2]; } -def V2Write_10cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 10; } -def V2Write_10cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 10; - let ReleaseAtCycles = [2]; } -def V2Write_12cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 12; - let ReleaseAtCycles = [11]; } -def V2Write_13cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 13; } -def V2Write_15cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 15; } -def V2Write_15cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 15; - let ReleaseAtCycles = [8]; } -def V2Write_16cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 16; } -def V2Write_16cyc_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 16; - let ReleaseAtCycles = [8]; } -def V2Write_20cyc_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 20; - let ReleaseAtCycles = [20]; } -def V2Write_2cyc_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 2; } -def V2Write_2cyc_1V13 : SchedWriteRes<[V2UnitV13]> { let Latency = 2; } -def V2Write_3cyc_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 3; } -def V2Write_4cyc_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 4; } -def V2Write_4cyc_1V13 : SchedWriteRes<[V2UnitV13]> { let Latency = 4; } -def V2Write_6cyc_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 6; } -def V2Write_10cyc_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 10; } -def V2Write_6cyc_1L01 : SchedWriteRes<[V2UnitL01]> { let Latency = 6; } +def V2Write_1c_1B : SchedWriteRes<[V2UnitB]> { let Latency = 1; } +def V2Write_1c_1F : SchedWriteRes<[V2UnitF]> { let Latency = 1; } +def V2Write_1c_1I : SchedWriteRes<[V2UnitI]> { let Latency = 1; } +def V2Write_1c_1M : SchedWriteRes<[V2UnitM]> { let Latency = 1; } +def V2Write_1c_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 1; } +def V2Write_1c_1L01 : SchedWriteRes<[V2UnitL01]> { let Latency = 1; } +def V2Write_2c_1M : SchedWriteRes<[V2UnitM]> { let Latency = 2; } +def V2Write_3c_1M : SchedWriteRes<[V2UnitM]> { let Latency = 3; } +def V2Write_2c_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 2; } +def V2Write_3c_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 3; } +def V2Write_5c_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 5; } +def V2Write_12c_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 12; + let ReleaseAtCycles = [12]; } +def V2Write_20c_1M0 : SchedWriteRes<[V2UnitM0]> { let Latency = 20; + let ReleaseAtCycles = [20]; } +def V2Write_4c_1L : SchedWriteRes<[V2UnitL]> { let Latency = 4; } +def V2Write_6c_1L : SchedWriteRes<[V2UnitL]> { let Latency = 6; } +def V2Write_2c_1V : SchedWriteRes<[V2UnitV]> { let Latency = 2; } +def V2Write_2c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 2; } +def V2Write_2c_1V01 : SchedWriteRes<[V2UnitV01]> { let Latency = 2; } +def V2Write_2c_1V23 : SchedWriteRes<[V2UnitV23]> { let Latency = 2; } +def V2Write_3c_1V : SchedWriteRes<[V2UnitV]> { let Latency = 3; } +def V2Write_3c_1V01 : SchedWriteRes<[V2UnitV01]> { let Latency = 3; + let ReleaseAtCycles = [2]; } +def V2Write_3c_1V23 : SchedWriteRes<[V2UnitV23]> { let Latency = 3; } +def V2Write_4c_1V : SchedWriteRes<[V2UnitV]> { let Latency = 4; } +def V2Write_5c_1V : SchedWriteRes<[V2UnitV]> { let Latency = 5; } +def V2Write_6c_1V : SchedWriteRes<[V2UnitV]> { let Latency = 6; } +def V2Write_12c_1V : SchedWriteRes<[V2UnitV]> { let Latency = 12; } +def V2Write_3c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 3; } +def V2Write_3c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 3; } +def V2Write_4c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 4; } +def V2Write_4c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 4; } +def V2Write_7c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 7; + let ReleaseAtCycles = [7]; } +def V2Write_7c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 7; + let ReleaseAtCycles = [2]; } +def V2Write_9c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 9; } +def V2Write_9c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 9; + let ReleaseAtCycles = [2]; } +def V2Write_10c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 10; } +def V2Write_10c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 10; + let ReleaseAtCycles = [2]; } +def V2Write_12c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 12; + let ReleaseAtCycles = [11]; } +def V2Write_13c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 13; } +def V2Write_15c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 15; } +def V2Write_15c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 15; + let ReleaseAtCycles = [8]; } +def V2Write_16c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 16; } +def V2Write_16c_1V02 : SchedWriteRes<[V2UnitV02]> { let Latency = 16; + let ReleaseAtCycles = [8]; } +def V2Write_20c_1V0 : SchedWriteRes<[V2UnitV0]> { let Latency = 20; + let ReleaseAtCycles = [20]; } +def V2Write_2c_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 2; } +def V2Write_2c_1V13 : SchedWriteRes<[V2UnitV13]> { let Latency = 2; } +def V2Write_3c_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 3; } +def V2Write_4c_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 4; } +def V2Write_4c_1V13 : SchedWriteRes<[V2UnitV13]> { let Latency = 4; } +def V2Write_6c_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 6; } +def V2Write_10c_1V1 : SchedWriteRes<[V2UnitV1]> { let Latency = 10; } +def V2Write_6c_1L01 : SchedWriteRes<[V2UnitL01]> { let Latency = 6; } //===----------------------------------------------------------------------===// // Define generic 2 micro-op types -def V2Write_1cyc_1B_1R : SchedWriteRes<[V2UnitB, V2UnitR]> { +def V2Write_1c_1B_1R : SchedWriteRes<[V2UnitB, V2UnitR]> { let Latency = 1; let NumMicroOps = 2; } -def V2Write_6cyc_1M0_1B : SchedWriteRes<[V2UnitM0, V2UnitB]> { +def V2Write_6c_1M0_1B : SchedWriteRes<[V2UnitM0, V2UnitB]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_9cyc_1M0_1L : SchedWriteRes<[V2UnitM0, V2UnitL]> { +def V2Write_9c_1M0_1L : SchedWriteRes<[V2UnitM0, V2UnitL]> { let Latency = 9; let NumMicroOps = 2; } -def V2Write_3cyc_1I_1M : SchedWriteRes<[V2UnitI, V2UnitM]> { +def V2Write_3c_1I_1M : SchedWriteRes<[V2UnitI, V2UnitM]> { let Latency = 3; let NumMicroOps = 2; } -def V2Write_1cyc_2M : SchedWriteRes<[V2UnitM, V2UnitM]> { +def V2Write_1c_2M : SchedWriteRes<[V2UnitM, V2UnitM]> { let Latency = 1; let NumMicroOps = 2; } -def V2Write_3cyc_2M : SchedWriteRes<[V2UnitM, V2UnitM]> { +def V2Write_3c_2M : SchedWriteRes<[V2UnitM, V2UnitM]> { let Latency = 3; let NumMicroOps = 2; } -def V2Write_4cyc_2M : SchedWriteRes<[V2UnitM, V2UnitM]> { +def V2Write_4c_2M : SchedWriteRes<[V2UnitM, V2UnitM]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_5cyc_1L_1F : SchedWriteRes<[V2UnitL, V2UnitF]> { +def V2Write_5c_1L_1F : SchedWriteRes<[V2UnitL, V2UnitF]> { let Latency = 5; let NumMicroOps = 2; } -def V2Write_6cyc_1I_1L : SchedWriteRes<[V2UnitI, V2UnitL]> { +def V2Write_6c_1I_1L : SchedWriteRes<[V2UnitI, V2UnitL]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_7cyc_1F_1L : SchedWriteRes<[V2UnitF, V2UnitL]> { +def V2Write_7c_1F_1L : SchedWriteRes<[V2UnitF, V2UnitL]> { let Latency = 7; let NumMicroOps = 2; } -def V2Write_7cyc_1I_1L : SchedWriteRes<[V2UnitI, V2UnitL]> { +def V2Write_7c_1I_1L : SchedWriteRes<[V2UnitI, V2UnitL]> { let Latency = 7; let NumMicroOps = 2; } -def V2Write_1cyc_1L01_1D : SchedWriteRes<[V2UnitL01, V2UnitD]> { +def V2Write_1c_1L01_1D : SchedWriteRes<[V2UnitL01, V2UnitD]> { let Latency = 1; let NumMicroOps = 2; } -def V2Write_5cyc_1M0_1V : SchedWriteRes<[V2UnitM0, V2UnitV]> { +def V2Write_5c_1M0_1V : SchedWriteRes<[V2UnitM0, V2UnitV]> { let Latency = 5; let NumMicroOps = 2; } -def V2Write_2cyc_1L01_1V01 : SchedWriteRes<[V2UnitL01, V2UnitV01]> { +def V2Write_2c_1L01_1V01 : SchedWriteRes<[V2UnitL01, V2UnitV01]> { let Latency = 2; let NumMicroOps = 2; } -def V2Write_2cyc_1L01_1V : SchedWriteRes<[V2UnitL01, V2UnitV]> { +def V2Write_2c_1L01_1V : SchedWriteRes<[V2UnitL01, V2UnitV]> { let Latency = 2; let NumMicroOps = 2; } -def V2Write_2cyc_2V01 : SchedWriteRes<[V2UnitV01, V2UnitV01]> { +def V2Write_2c_2V01 : SchedWriteRes<[V2UnitV01, V2UnitV01]> { let Latency = 2; let NumMicroOps = 2; } -def V2Write_4cyc_2V01 : SchedWriteRes<[V2UnitV01, V2UnitV01]> { +def V2Write_4c_2V01 : SchedWriteRes<[V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_4cyc_1L01_1V01 : SchedWriteRes<[V2UnitL01, V2UnitV01]> { +def V2Write_4c_1L01_1V01 : SchedWriteRes<[V2UnitL01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_4cyc_1V13_1V : SchedWriteRes<[V2UnitV13, V2UnitV]> { +def V2Write_4c_1V13_1V : SchedWriteRes<[V2UnitV13, V2UnitV]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_4cyc_2V0 : SchedWriteRes<[V2UnitV0, V2UnitV0]> { +def V2Write_4c_2V0 : SchedWriteRes<[V2UnitV0, V2UnitV0]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_4cyc_2V02 : SchedWriteRes<[V2UnitV02, V2UnitV02]> { +def V2Write_4c_2V02 : SchedWriteRes<[V2UnitV02, V2UnitV02]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_4cyc_2V : SchedWriteRes<[V2UnitV, V2UnitV]> { +def V2Write_4c_2V : SchedWriteRes<[V2UnitV, V2UnitV]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_6cyc_2V : SchedWriteRes<[V2UnitV, V2UnitV]> { +def V2Write_6c_2V : SchedWriteRes<[V2UnitV, V2UnitV]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_6cyc_2L : SchedWriteRes<[V2UnitL, V2UnitL]> { +def V2Write_6c_2L : SchedWriteRes<[V2UnitL, V2UnitL]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_8cyc_1L_1V : SchedWriteRes<[V2UnitL, V2UnitV]> { +def V2Write_8c_1L_1V : SchedWriteRes<[V2UnitL, V2UnitV]> { let Latency = 8; let NumMicroOps = 2; } -def V2Write_4cyc_1L01_1V : SchedWriteRes<[V2UnitL01, V2UnitV]> { +def V2Write_4c_1L01_1V : SchedWriteRes<[V2UnitL01, V2UnitV]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_3cyc_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { +def V2Write_3c_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { let Latency = 3; let NumMicroOps = 2; } -def V2Write_4cyc_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { +def V2Write_4c_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_1cyc_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { +def V2Write_1c_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { let Latency = 1; let NumMicroOps = 2; } -def V2Write_2cyc_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { +def V2Write_2c_1M0_1M : SchedWriteRes<[V2UnitM0, V2UnitM]> { let Latency = 2; let NumMicroOps = 2; } -def V2Write_6cyc_2V1 : SchedWriteRes<[V2UnitV1, V2UnitV1]> { +def V2Write_6c_2V1 : SchedWriteRes<[V2UnitV1, V2UnitV1]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_4cyc_1V0_1M0 : SchedWriteRes<[V2UnitV0, V2UnitM0]> { +def V2Write_4c_1V0_1M0 : SchedWriteRes<[V2UnitV0, V2UnitM0]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_5cyc_1V0_1M0 : SchedWriteRes<[V2UnitV0, V2UnitM0]> { +def V2Write_5c_1V0_1M0 : SchedWriteRes<[V2UnitV0, V2UnitM0]> { let Latency = 5; let NumMicroOps = 2; } -def V2Write_5cyc_2V0 : SchedWriteRes<[V2UnitV0, V2UnitV0]> { +def V2Write_5c_2V0 : SchedWriteRes<[V2UnitV0, V2UnitV0]> { let Latency = 5; let NumMicroOps = 2; } -def V2Write_5cyc_2V02 : SchedWriteRes<[V2UnitV02, V2UnitV02]> { +def V2Write_5c_2V02 : SchedWriteRes<[V2UnitV02, V2UnitV02]> { let Latency = 5; let NumMicroOps = 2; } -def V2Write_6cyc_1V1_1M0 : SchedWriteRes<[V2UnitV1, V2UnitM0]> { +def V2Write_6c_1V1_1M0 : SchedWriteRes<[V2UnitV1, V2UnitM0]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_7cyc_1M0_1V02 : SchedWriteRes<[V2UnitM0, V2UnitV02]> { +def V2Write_7c_1M0_1V02 : SchedWriteRes<[V2UnitM0, V2UnitV02]> { let Latency = 7; let NumMicroOps = 2; } -def V2Write_2cyc_1V0_1M : SchedWriteRes<[V2UnitV0, V2UnitM]> { +def V2Write_2c_1V0_1M : SchedWriteRes<[V2UnitV0, V2UnitM]> { let Latency = 2; let NumMicroOps = 2; } -def V2Write_3cyc_1V0_1M : SchedWriteRes<[V2UnitV0, V2UnitM]> { +def V2Write_3c_1V0_1M : SchedWriteRes<[V2UnitV0, V2UnitM]> { let Latency = 3; let NumMicroOps = 2; } -def V2Write_6cyc_1V_1V13 : SchedWriteRes<[V2UnitV, V2UnitV13]> { +def V2Write_6c_1V_1V13 : SchedWriteRes<[V2UnitV, V2UnitV13]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_6cyc_1L_1M : SchedWriteRes<[V2UnitL, V2UnitM]> { +def V2Write_6c_1L_1M : SchedWriteRes<[V2UnitL, V2UnitM]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_6cyc_1L_1S : SchedWriteRes<[V2UnitL, V2UnitS]> { +def V2Write_6c_1L_1S : SchedWriteRes<[V2UnitL, V2UnitS]> { let Latency = 6; let NumMicroOps = 2; } -def V2Write_4cyc_2V13 : SchedWriteRes<[V2UnitV13, V2UnitV13]> { +def V2Write_4c_2V13 : SchedWriteRes<[V2UnitV13, V2UnitV13]> { let Latency = 4; let NumMicroOps = 2; } -def V2Write_8cyc_1M0_1V01 : SchedWriteRes<[V2UnitM0, V2UnitV01]> { +def V2Write_8c_1M0_1V01 : SchedWriteRes<[V2UnitM0, V2UnitV01]> { let Latency = 8; let NumMicroOps = 2; } @@ -384,62 +384,62 @@ def V2Write_8cyc_1M0_1V01 : SchedWriteRes<[V2UnitM0, V2UnitV01]> { //===----------------------------------------------------------------------===// // Define generic 3 micro-op types -def V2Write_1cyc_1L01_1D_1I : SchedWriteRes<[V2UnitL01, V2UnitD, V2UnitI]> { +def V2Write_1c_1L01_1D_1I : SchedWriteRes<[V2UnitL01, V2UnitD, V2UnitI]> { let Latency = 1; let NumMicroOps = 3; } -def V2Write_2cyc_1L01_1V01_1I : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitI]> { +def V2Write_2c_1L01_1V01_1I : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitI]> { let Latency = 2; let NumMicroOps = 3; } -def V2Write_2cyc_1L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01]> { +def V2Write_2c_1L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01]> { let Latency = 2; let NumMicroOps = 3; } -def V2Write_4cyc_1L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01]> { +def V2Write_4c_1L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 3; } -def V2Write_9cyc_1L_2V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV]> { +def V2Write_9c_1L_2V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV]> { let Latency = 9; let NumMicroOps = 3; } -def V2Write_4cyc_3V01 : SchedWriteRes<[V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_4c_3V01 : SchedWriteRes<[V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 3; } -def V2Write_7cyc_1M_1M0_1V : SchedWriteRes<[V2UnitM, V2UnitM0, V2UnitV]> { +def V2Write_7c_1M_1M0_1V : SchedWriteRes<[V2UnitM, V2UnitM0, V2UnitV]> { let Latency = 7; let NumMicroOps = 3; } -def V2Write_2cyc_1L01_1S_1V : SchedWriteRes<[V2UnitL01, V2UnitS, V2UnitV]> { +def V2Write_2c_1L01_1S_1V : SchedWriteRes<[V2UnitL01, V2UnitS, V2UnitV]> { let Latency = 2; let NumMicroOps = 3; } -def V2Write_2cyc_1L01_1S_1V01 : SchedWriteRes<[V2UnitL01, V2UnitS, V2UnitV01]> { +def V2Write_2c_1L01_1S_1V01 : SchedWriteRes<[V2UnitL01, V2UnitS, V2UnitV01]> { let Latency = 2; let NumMicroOps = 3; } -def V2Write_6cyc_3L : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL]> { +def V2Write_6c_3L : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL]> { let Latency = 6; let NumMicroOps = 3; } -def V2Write_6cyc_3V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_6c_3V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV]> { let Latency = 6; let NumMicroOps = 3; } -def V2Write_8cyc_1L_2V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV]> { +def V2Write_8c_1L_2V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV]> { let Latency = 8; let NumMicroOps = 3; } @@ -447,126 +447,126 @@ def V2Write_8cyc_1L_2V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV]> { //===----------------------------------------------------------------------===// // Define generic 4 micro-op types -def V2Write_2cyc_1L01_2V01_1I : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitI]> { +def V2Write_2c_1L01_2V01_1I : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitI]> { let Latency = 2; let NumMicroOps = 4; } -def V2Write_2cyc_2L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, - V2UnitV01, V2UnitV01]> { +def V2Write_2c_2L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, + V2UnitV01, V2UnitV01]> { let Latency = 2; let NumMicroOps = 4; } -def V2Write_4cyc_2L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, - V2UnitV01, V2UnitV01]> { +def V2Write_4c_2L01_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, + V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 4; } -def V2Write_5cyc_1I_3L : SchedWriteRes<[V2UnitI, V2UnitL, V2UnitL, V2UnitL]> { +def V2Write_5c_1I_3L : SchedWriteRes<[V2UnitI, V2UnitL, V2UnitL, V2UnitL]> { let Latency = 5; let NumMicroOps = 4; } -def V2Write_9cyc_2L_2V1 : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV1, - V2UnitV1]> { +def V2Write_9c_2L_2V1 : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV1, + V2UnitV1]> { let Latency = 9; let NumMicroOps = 4; } -def V2Write_6cyc_4V0 : SchedWriteRes<[V2UnitV0, V2UnitV0, V2UnitV0, V2UnitV0]> { +def V2Write_6c_4V0 : SchedWriteRes<[V2UnitV0, V2UnitV0, V2UnitV0, V2UnitV0]> { let Latency = 6; let NumMicroOps = 4; } -def V2Write_8cyc_4V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_8c_4V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { let Latency = 8; let NumMicroOps = 4; } -def V2Write_6cyc_2V_2V13 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV13, - V2UnitV13]> { +def V2Write_6c_2V_2V13 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV13, + V2UnitV13]> { let Latency = 6; let NumMicroOps = 4; } -def V2Write_8cyc_2V_2V13 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV13, - V2UnitV13]> { +def V2Write_8c_2V_2V13 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV13, + V2UnitV13]> { let Latency = 8; let NumMicroOps = 4; } -def V2Write_6cyc_4V02 : SchedWriteRes<[V2UnitV02, V2UnitV02, V2UnitV02, - V2UnitV02]> { +def V2Write_6c_4V02 : SchedWriteRes<[V2UnitV02, V2UnitV02, V2UnitV02, + V2UnitV02]> { let Latency = 6; let NumMicroOps = 4; } -def V2Write_6cyc_4V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_6c_4V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { let Latency = 6; let NumMicroOps = 4; } -def V2Write_8cyc_2L_2V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, V2UnitV]> { +def V2Write_8c_2L_2V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, V2UnitV]> { let Latency = 8; let NumMicroOps = 4; } -def V2Write_9cyc_2L_2V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, V2UnitV]> { +def V2Write_9c_2L_2V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, V2UnitV]> { let Latency = 9; let NumMicroOps = 4; } -def V2Write_2cyc_2L01_2V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV, - V2UnitV]> { +def V2Write_2c_2L01_2V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV, + V2UnitV]> { let Latency = 2; let NumMicroOps = 4; } -def V2Write_4cyc_2L01_2V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV, - V2UnitV]> { +def V2Write_4c_2L01_2V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV, + V2UnitV]> { let Latency = 4; let NumMicroOps = 4; } -def V2Write_8cyc_2M0_2V02 : SchedWriteRes<[V2UnitM0, V2UnitM0, V2UnitV02, - V2UnitV02]> { +def V2Write_8c_2M0_2V02 : SchedWriteRes<[V2UnitM0, V2UnitM0, V2UnitV02, + V2UnitV02]> { let Latency = 8; let NumMicroOps = 4; } -def V2Write_8cyc_2V_2V1 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV1, - V2UnitV1]> { +def V2Write_8c_2V_2V1 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV1, + V2UnitV1]> { let Latency = 8; let NumMicroOps = 4; } -def V2Write_4cyc_2M0_2M : SchedWriteRes<[V2UnitM0, V2UnitM0, V2UnitM, - V2UnitM]> { +def V2Write_4c_2M0_2M : SchedWriteRes<[V2UnitM0, V2UnitM0, V2UnitM, + V2UnitM]> { let Latency = 4; let NumMicroOps = 4; } -def V2Write_5cyc_2M0_2M : SchedWriteRes<[V2UnitM0, V2UnitM0, V2UnitM, - V2UnitM]> { +def V2Write_5c_2M0_2M : SchedWriteRes<[V2UnitM0, V2UnitM0, V2UnitM, + V2UnitM]> { let Latency = 5; let NumMicroOps = 4; } -def V2Write_6cyc_2I_2L : SchedWriteRes<[V2UnitI, V2UnitI, V2UnitL, V2UnitL]> { +def V2Write_6c_2I_2L : SchedWriteRes<[V2UnitI, V2UnitI, V2UnitL, V2UnitL]> { let Latency = 6; let NumMicroOps = 4; } -def V2Write_7cyc_4L : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL]> { +def V2Write_7c_4L : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL]> { let Latency = 7; let NumMicroOps = 4; } -def V2Write_6cyc_1L01_3V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitV01]> { +def V2Write_6c_1L01_3V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitV01]> { let Latency = 6; let NumMicroOps = 4; } @@ -574,32 +574,32 @@ def V2Write_6cyc_1L01_3V01 : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, //===----------------------------------------------------------------------===// // Define generic 5 micro-op types -def V2Write_2cyc_1L01_2V01_2I : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitI, V2UnitI]> { +def V2Write_2c_1L01_2V01_2I : SchedWriteRes<[V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitI, V2UnitI]> { let Latency = 2; let NumMicroOps = 5; } -def V2Write_8cyc_2L_3V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, V2UnitV, - V2UnitV]> { +def V2Write_8c_2L_3V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, V2UnitV, + V2UnitV]> { let Latency = 8; let NumMicroOps = 5; } -def V2Write_9cyc_1L_4V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV, V2UnitV, - V2UnitV]> { +def V2Write_9c_1L_4V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV, V2UnitV, + V2UnitV]> { let Latency = 9; let NumMicroOps = 5; } -def V2Write_10cyc_1L_4V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV, V2UnitV, - V2UnitV]> { +def V2Write_10c_1L_4V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV, V2UnitV, + V2UnitV]> { let Latency = 10; let NumMicroOps = 5; } -def V2Write_6cyc_5V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV, - V2UnitV]> { +def V2Write_6c_5V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV, + V2UnitV]> { let Latency = 6; let NumMicroOps = 5; } @@ -607,62 +607,62 @@ def V2Write_6cyc_5V : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, V2UnitV, //===----------------------------------------------------------------------===// // Define generic 6 micro-op types -def V2Write_8cyc_3L_3V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, - V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_8c_3L_3V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, + V2UnitV, V2UnitV, V2UnitV]> { let Latency = 8; let NumMicroOps = 6; } -def V2Write_9cyc_3L_3V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, - V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_9c_3L_3V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, + V2UnitV, V2UnitV, V2UnitV]> { let Latency = 9; let NumMicroOps = 6; } -def V2Write_9cyc_2L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, - V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_9c_2L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, + V2UnitV, V2UnitV, V2UnitV]> { let Latency = 9; let NumMicroOps = 6; } -def V2Write_9cyc_2L_2V_2S : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, - V2UnitV, V2UnitS, V2UnitS]> { +def V2Write_9c_2L_2V_2S : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitV, + V2UnitV, V2UnitS, V2UnitS]> { let Latency = 9; let NumMicroOps = 6; } -def V2Write_9cyc_2V_4V13 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV13, - V2UnitV13, V2UnitV13, V2UnitV13]> { +def V2Write_9c_2V_4V13 : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV13, + V2UnitV13, V2UnitV13, V2UnitV13]> { let Latency = 9; let NumMicroOps = 6; } -def V2Write_2cyc_3L01_3V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_2c_3L01_3V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitV, V2UnitV, V2UnitV]> { let Latency = 2; let NumMicroOps = 6; } -def V2Write_4cyc_2L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_4c_2L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 6; } -def V2Write_5cyc_2L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_5c_2L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 5; let NumMicroOps = 6; } -def V2Write_2cyc_3L01_3V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_2c_3L01_3V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 2; let NumMicroOps = 6; } -def V2Write_4cyc_2L01_2S_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitS, - V2UnitS, V2UnitV01, V2UnitV01]> { +def V2Write_4c_2L01_2S_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitS, + V2UnitS, V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 6; } @@ -670,8 +670,8 @@ def V2Write_4cyc_2L01_2S_2V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitS, //===----------------------------------------------------------------------===// // Define generic 7 micro-op types -def V2Write_8cyc_3L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, - V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_8c_3L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, + V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { let Latency = 8; let NumMicroOps = 7; } @@ -679,36 +679,36 @@ def V2Write_8cyc_3L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, //===----------------------------------------------------------------------===// // Define generic 8 micro-op types -def V2Write_2cyc_4L01_4V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitV, V2UnitV, V2UnitV, - V2UnitV]> { +def V2Write_2c_4L01_4V : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitV, V2UnitV, V2UnitV, + V2UnitV]> { let Latency = 2; let NumMicroOps = 8; } -def V2Write_2cyc_4L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01]> { +def V2Write_2c_4L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01]> { let Latency = 2; let NumMicroOps = 8; } -def V2Write_4cyc_4L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01]> { +def V2Write_4c_4L01_4V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01]> { let Latency = 4; let NumMicroOps = 8; } -def V2Write_6cyc_2L01_6V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01]> { +def V2Write_6c_2L01_6V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01]> { let Latency = 6; let NumMicroOps = 8; } -def V2Write_8cyc_4L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL, - V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_8c_4L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL, + V2UnitV, V2UnitV, V2UnitV, V2UnitV]> { let Latency = 8; let NumMicroOps = 8; } @@ -716,23 +716,23 @@ def V2Write_8cyc_4L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL, //===----------------------------------------------------------------------===// // Define generic 9 micro-op types -def V2Write_6cyc_3L01_6V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_6c_3L01_6V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 6; let NumMicroOps = 9; } -def V2Write_10cyc_1L_8V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV, V2UnitV, - V2UnitV]> { +def V2Write_10c_1L_8V : SchedWriteRes<[V2UnitL, V2UnitV, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV, V2UnitV, + V2UnitV]> { let Latency = 10; let NumMicroOps = 9; } -def V2Write_10cyc_3V_3L_3S : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, - V2UnitL, V2UnitL, V2UnitL, - V2UnitS, V2UnitS, V2UnitS]> { +def V2Write_10c_3V_3L_3S : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, + V2UnitL, V2UnitL, V2UnitL, + V2UnitS, V2UnitS, V2UnitS]> { let Latency = 10; let NumMicroOps = 9; } @@ -740,9 +740,9 @@ def V2Write_10cyc_3V_3L_3S : SchedWriteRes<[V2UnitV, V2UnitV, V2UnitV, //===----------------------------------------------------------------------===// // Define generic 10 micro-op types -def V2Write_9cyc_6L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL, - V2UnitL, V2UnitL, V2UnitV, V2UnitV, - V2UnitV, V2UnitV]> { +def V2Write_9c_6L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL, + V2UnitL, V2UnitL, V2UnitV, V2UnitV, + V2UnitV, V2UnitV]> { let Latency = 9; let NumMicroOps = 10; } @@ -750,26 +750,26 @@ def V2Write_9cyc_6L_4V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, V2UnitL, //===----------------------------------------------------------------------===// // Define generic 12 micro-op types -def V2Write_5cyc_4L01_8V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_5c_4L01_8V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 5; let NumMicroOps = 12; } -def V2Write_9cyc_4L_8V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, - V2UnitL, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_9c_4L_8V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, + V2UnitL, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV]> { let Latency = 9; let NumMicroOps = 12; } -def V2Write_10cyc_4L_8V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, - V2UnitL, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV]> { +def V2Write_10c_4L_8V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, + V2UnitL, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV]> { let Latency = 10; let NumMicroOps = 12; } @@ -777,22 +777,22 @@ def V2Write_10cyc_4L_8V : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, //===----------------------------------------------------------------------===// // Define generic 16 micro-op types -def V2Write_7cyc_4L01_12V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01]> { +def V2Write_7c_4L01_12V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01]> { let Latency = 7; let NumMicroOps = 16; } -def V2Write_10cyc_4L_8V_4S : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, - V2UnitL, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV, - V2UnitV, V2UnitV, V2UnitV, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS]> { +def V2Write_10c_4L_8V_4S : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, + V2UnitL, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV, + V2UnitV, V2UnitV, V2UnitV, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS]> { let Latency = 10; let NumMicroOps = 16; } @@ -800,12 +800,12 @@ def V2Write_10cyc_4L_8V_4S : SchedWriteRes<[V2UnitL, V2UnitL, V2UnitL, //===----------------------------------------------------------------------===// // Define generic 18 micro-op types -def V2Write_7cyc_9L01_9V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01]> { +def V2Write_7c_9L01_9V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01]> { let Latency = 7; let NumMicroOps = 18; } @@ -813,16 +813,16 @@ def V2Write_7cyc_9L01_9V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, //===----------------------------------------------------------------------===// // Define generic 27 micro-op types -def V2Write_7cyc_9L01_9S_9V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01]> { +def V2Write_7c_9L01_9S_9V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01]> { let Latency = 7; let NumMicroOps = 27; } @@ -830,19 +830,19 @@ def V2Write_7cyc_9L01_9S_9V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, //===----------------------------------------------------------------------===// // Define generic 36 micro-op types -def V2Write_11cyc_18L01_18V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, V2UnitL01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01]> { +def V2Write_11c_18L01_18V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, V2UnitL01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01]> { let Latency = 11; let NumMicroOps = 36; } @@ -850,30 +850,30 @@ def V2Write_11cyc_18L01_18V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, V2UnitL01, //===----------------------------------------------------------------------===// // Define generic 54 micro-op types -def V2Write_11cyc_18L01_18S_18V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitL01, V2UnitL01, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitS, V2UnitS, V2UnitS, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01, - V2UnitV01, V2UnitV01]> { +def V2Write_11c_18L01_18S_18V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitL01, V2UnitL01, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitS, V2UnitS, V2UnitS, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01, + V2UnitV01, V2UnitV01]> { let Latency = 11; let NumMicroOps = 54; } @@ -882,80 +882,80 @@ def V2Write_11cyc_18L01_18S_18V01 : SchedWriteRes<[V2UnitL01, V2UnitL01, // Define predicate-controlled types def V2Write_ArithI : SchedWriteVariant<[ - SchedVar<IsCheapLSL, [V2Write_1cyc_1I]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1M]>]>; + SchedVar<IsCheapLSL, [V2Write_1c_1I]>, + SchedVar<NoSchedPred, [V2Write_2c_1M]>]>; def V2Write_ArithF : SchedWriteVariant<[ - SchedVar<IsCheapLSL, [V2Write_1cyc_1F]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1M]>]>; + SchedVar<IsCheapLSL, [V2Write_1c_1F]>, + SchedVar<NoSchedPred, [V2Write_2c_1M]>]>; def V2Write_Logical : SchedWriteVariant<[ - SchedVar<NeoverseNoLSL, [V2Write_1cyc_1F]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1M]>]>; + SchedVar<NeoverseNoLSL, [V2Write_1c_1F]>, + SchedVar<NoSchedPred, [V2Write_2c_1M]>]>; def V2Write_Extr : SchedWriteVariant<[ - SchedVar<IsRORImmIdiomPred, [V2Write_1cyc_1I]>, - SchedVar<NoSchedPred, [V2Write_3cyc_1I_1M]>]>; + SchedVar<IsRORImmIdiomPred, [V2Write_1c_1I]>, + SchedVar<NoSchedPred, [V2Write_3c_1I_1M]>]>; def V2Write_LdrHQ : SchedWriteVariant<[ - SchedVar<NeoverseHQForm, [V2Write_7cyc_1I_1L]>, - SchedVar<NoSchedPred, [V2Write_6cyc_1L]>]>; + SchedVar<NeoverseHQForm, [V2Write_7c_1I_1L]>, + SchedVar<NoSchedPred, [V2Write_6c_1L]>]>; def V2Write_StrHQ : SchedWriteVariant<[ - SchedVar<NeoverseHQForm, [V2Write_2cyc_1L01_1V01_1I]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1L01_1V01]>]>; + SchedVar<NeoverseHQForm, [V2Write_2c_1L01_1V01_1I]>, + SchedVar<NoSchedPred, [V2Write_2c_1L01_1V01]>]>; -def V2Write_0or1cyc_1I : SchedWriteVariant<[ - SchedVar<NeoverseZeroMove, [V2Write_0cyc]>, - SchedVar<NoSchedPred, [V2Write_1cyc_1I]>]>; +def V2Write_0or1c_1I : SchedWriteVariant<[ + SchedVar<NeoverseZeroMove, [V2Write_0c]>, + SchedVar<NoSchedPred, [V2Write_1c_1I]>]>; -def V2Write_0or2cyc_1V : SchedWriteVariant<[ - SchedVar<NeoverseZeroMove, [V2Write_0cyc]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1V]>]>; +def V2Write_0or2c_1V : SchedWriteVariant<[ + SchedVar<NeoverseZeroMove, [V2Write_0c]>, + SchedVar<NoSchedPred, [V2Write_2c_1V]>]>; -def V2Write_0or3cyc_1M0 : SchedWriteVariant<[ - SchedVar<NeoverseZeroMove, [V2Write_0cyc]>, - SchedVar<NoSchedPred, [V2Write_3cyc_1M0]>]>; +def V2Write_0or3c_1M0 : SchedWriteVariant<[ + SchedVar<NeoverseZeroMove, [V2Write_0c]>, + SchedVar<NoSchedPred, [V2Write_3c_1M0]>]>; -def V2Write_2or3cyc_1M : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_3cyc_1M]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1M]>]>; +def V2Write_2or3c_1M : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_3c_1M]>, + SchedVar<NoSchedPred, [V2Write_2c_1M]>]>; -def V2Write_3or4cyc_2M : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_4cyc_2M]>, - SchedVar<NoSchedPred, [V2Write_3cyc_2M]>]>; +def V2Write_3or4c_2M : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_4c_2M]>, + SchedVar<NoSchedPred, [V2Write_3c_2M]>]>; -def V2Write_1or2cyc_1M0 : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_2cyc_1M0]>, - SchedVar<NoSchedPred, [V2Write_1cyc_1M0]>]>; +def V2Write_1or2c_1M0 : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_2c_1M0]>, + SchedVar<NoSchedPred, [V2Write_1c_1M0]>]>; -def V2Write_2or3cyc_1M0 : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_3cyc_1M0]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1M0]>]>; +def V2Write_2or3c_1M0 : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_3c_1M0]>, + SchedVar<NoSchedPred, [V2Write_2c_1M0]>]>; -def V2Write_1or2cyc_1M0_1M : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_2cyc_1M0_1M]>, - SchedVar<NoSchedPred, [V2Write_1cyc_1M0_1M]>]>; +def V2Write_1or2c_1M0_1M : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_2c_1M0_1M]>, + SchedVar<NoSchedPred, [V2Write_1c_1M0_1M]>]>; -def V2Write_3or4cyc_1M0_1M : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_4cyc_1M0_1M]>, - SchedVar<NoSchedPred, [V2Write_3cyc_1M0_1M]>]>; +def V2Write_3or4c_1M0_1M : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_4c_1M0_1M]>, + SchedVar<NoSchedPred, [V2Write_3c_1M0_1M]>]>; -def V2Write_4or5cyc_2M0_2M : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_5cyc_2M0_2M]>, - SchedVar<NoSchedPred, [V2Write_4cyc_2M0_2M]>]>; +def V2Write_4or5c_2M0_2M : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_5c_2M0_2M]>, + SchedVar<NoSchedPred, [V2Write_4c_2M0_2M]>]>; -def V2Write_4or5cyc_1V0_1M0 : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_5cyc_1V0_1M0]>, - SchedVar<NoSchedPred, [V2Write_4cyc_1V0_1M0]>]>; +def V2Write_4or5c_1V0_1M0 : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_5c_1V0_1M0]>, + SchedVar<NoSchedPred, [V2Write_4c_1V0_1M0]>]>; -def V2Write_2or3cyc_1V0_1M : SchedWriteVariant<[ - SchedVar<NeoversePdIsPg, [V2Write_3cyc_1V0_1M]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1V0_1M]>]>; +def V2Write_2or3c_1V0_1M : SchedWriteVariant<[ + SchedVar<NeoversePdIsPg, [V2Write_3c_1V0_1M]>, + SchedVar<NoSchedPred, [V2Write_2c_1V0_1M]>]>; def V2Write_IncDec : SchedWriteVariant<[ - SchedVar<NeoverseCheapIncDec, [V2Write_1cyc_1F]>, - SchedVar<NoSchedPred, [V2Write_2cyc_1M]>]>; + SchedVar<NeoverseCheapIncDec, [V2Write_1c_1F]>, + SchedVar<NoSchedPred, [V2Write_2c_1M]>]>; //===----------------------------------------------------------------------===// // Define forwarded types @@ -1071,17 +1071,17 @@ def V2Rd_ZBFMAL : SchedReadAdvance<3, [V2Wr_ZBFMAL]>; //===----------------------------------------------------------------------===// // Define types with long resource cycles (rc) -def V2Write_6cyc_1V1_5rc : SchedWriteRes<[V2UnitV1]> { let Latency = 6; let ReleaseAtCycles = [ 5]; } -def V2Write_7cyc_1V02_7rc : SchedWriteRes<[V2UnitV02]> { let Latency = 7; let ReleaseAtCycles = [ 7]; } -def V2Write_10cyc_1V02_5rc : SchedWriteRes<[V2UnitV02]> { let Latency = 10; let ReleaseAtCycles = [ 5]; } -def V2Write_10cyc_1V02_9rc : SchedWriteRes<[V2UnitV02]> { let Latency = 10; let ReleaseAtCycles = [ 9]; } -def V2Write_10cyc_1V02_10rc : SchedWriteRes<[V2UnitV02]> { let Latency = 10; let ReleaseAtCycles = [10]; } -def V2Write_10cyc_1V1_9rc : SchedWriteRes<[V2UnitV1]> { let Latency = 10; let ReleaseAtCycles = [ 9]; } -def V2Write_13cyc_1V02_12rc : SchedWriteRes<[V2UnitV02]> { let Latency = 13; let ReleaseAtCycles = [12]; } -def V2Write_13cyc_1V02_13rc : SchedWriteRes<[V2UnitV02]> { let Latency = 13; let ReleaseAtCycles = [13]; } -def V2Write_15cyc_1V02_14rc : SchedWriteRes<[V2UnitV02]> { let Latency = 15; let ReleaseAtCycles = [14]; } -def V2Write_16cyc_1V02_14rc : SchedWriteRes<[V2UnitV02]> { let Latency = 16; let ReleaseAtCycles = [14]; } -def V2Write_16cyc_1V02_15rc : SchedWriteRes<[V2UnitV02]> { let Latency = 16; let ReleaseAtCycles = [15]; } +def V2Write_6c_1V1_5rc : SchedWriteRes<[V2UnitV1]> { let Latency = 6; let ReleaseAtCycles = [ 5]; } +def V2Write_7c_1V02_7rc : SchedWriteRes<[V2UnitV02]> { let Latency = 7; let ReleaseAtCycles = [ 7]; } +def V2Write_10c_1V02_5rc : SchedWriteRes<[V2UnitV02]> { let Latency = 10; let ReleaseAtCycles = [ 5]; } +def V2Write_10c_1V02_9rc : SchedWriteRes<[V2UnitV02]> { let Latency = 10; let ReleaseAtCycles = [ 9]; } +def V2Write_10c_1V02_10rc : SchedWriteRes<[V2UnitV02]> { let Latency = 10; let ReleaseAtCycles = [10]; } +def V2Write_10c_1V1_9rc : SchedWriteRes<[V2UnitV1]> { let Latency = 10; let ReleaseAtCycles = [ 9]; } +def V2Write_13c_1V02_12rc : SchedWriteRes<[V2UnitV02]> { let Latency = 13; let ReleaseAtCycles = [12]; } +def V2Write_13c_1V02_13rc : SchedWriteRes<[V2UnitV02]> { let Latency = 13; let ReleaseAtCycles = [13]; } +def V2Write_15c_1V02_14rc : SchedWriteRes<[V2UnitV02]> { let Latency = 15; let ReleaseAtCycles = [14]; } +def V2Write_16c_1V02_14rc : SchedWriteRes<[V2UnitV02]> { let Latency = 16; let ReleaseAtCycles = [14]; } +def V2Write_16c_1V02_15rc : SchedWriteRes<[V2UnitV02]> { let Latency = 16; let ReleaseAtCycles = [15]; } // Miscellaneous // ----------------------------------------------------------------------------- @@ -1093,31 +1093,31 @@ def : InstRW<[WriteI], (instrs COPY)>; // Branch, immed // Compare and branch -def : SchedAlias<WriteBr, V2Write_1cyc_1B>; +def : SchedAlias<WriteBr, V2Write_1c_1B>; // Branch, register -def : SchedAlias<WriteBrReg, V2Write_1cyc_1B>; +def : SchedAlias<WriteBrReg, V2Write_1c_1B>; // Branch and link, immed // Branch and link, register -def : InstRW<[V2Write_1cyc_1B_1R], (instrs BL, BLR)>; +def : InstRW<[V2Write_1c_1B_1R], (instrs BL, BLR)>; // §3.4 Arithmetic and Logical Instructions // ----------------------------------------------------------------------------- // ALU, basic // ALU, basic, flagset -def : SchedAlias<WriteI, V2Write_1cyc_1I>; -def : InstRW<[V2Write_1cyc_1F], (instregex "^(ADD|SUB)S[WX]r[ir]$", +def : SchedAlias<WriteI, V2Write_1c_1I>; +def : InstRW<[V2Write_1c_1F], (instregex "^(ADD|SUB)S[WX]r[ir]$", "^(ADC|SBC)S[WX]r$", "^ANDS[WX]ri$")>; -def : InstRW<[V2Write_0or1cyc_1I], (instregex "^MOVZ[WX]i$")>; +def : InstRW<[V2Write_0or1c_1I], (instregex "^MOVZ[WX]i$")>; // ALU, extend and shift -def : SchedAlias<WriteIEReg, V2Write_2cyc_1M>; +def : SchedAlias<WriteIEReg, V2Write_2c_1M>; // Conditional compare -def : InstRW<[V2Write_1cyc_1F], (instregex "^CCM[NP][WX][ir]")>; +def : InstRW<[V2Write_1c_1F], (instregex "^CCM[NP][WX][ir]")>; // Arithmetic, LSL shift, shift <= 4 // Arithmetic, flagset, LSL shift, shift <= 4 @@ -1127,23 +1127,23 @@ def : InstRW<[V2Write_ArithF], (instregex "^(ADD|SUB)S[WX]rs$")>; // Arithmetic, immediate to logical address tag -def : InstRW<[V2Write_2cyc_1M], (instrs ADDG, SUBG)>; +def : InstRW<[V2Write_2c_1M], (instrs ADDG, SUBG)>; // Convert floating-point condition flags // Flag manipulation instructions def : WriteRes<WriteSys, []> { let Latency = 1; } // Insert Random Tags -def : InstRW<[V2Write_2cyc_1M], (instrs IRG, IRGstack)>; +def : InstRW<[V2Write_2c_1M], (instrs IRG, IRGstack)>; // Insert Tag Mask // Subtract Pointer // Subtract Pointer, flagset -def : InstRW<[V2Write_1cyc_1I], (instrs GMI, SUBP, SUBPS)>; +def : InstRW<[V2Write_1c_1I], (instrs GMI, SUBP, SUBPS)>; // Logical, shift, no flagset -def : InstRW<[V2Write_1cyc_1I], (instregex "^(AND|BIC|EON|EOR|ORN)[WX]rs$")>; -def : InstRW<[V2Write_0or1cyc_1I], (instregex "^ORR[WX]rs$")>; +def : InstRW<[V2Write_1c_1I], (instregex "^(AND|BIC|EON|EOR|ORN)[WX]rs$")>; +def : InstRW<[V2Write_0or1c_1I], (instregex "^ORR[WX]rs$")>; // Logical, shift, flagset def : InstRW<[V2Write_Logical], (instregex "^(AND|BIC)S[WX]rs$")>; @@ -1151,17 +1151,17 @@ def : InstRW<[V2Write_Logical], (instregex "^(AND|BIC)S[WX]rs$")>; // Move and shift instructions // ----------------------------------------------------------------------------- -def : SchedAlias<WriteImm, V2Write_1cyc_1I>; +def : SchedAlias<WriteImm, V2Write_1c_1I>; // §3.5 Divide and multiply instructions // ----------------------------------------------------------------------------- // SDIV, UDIV -def : SchedAlias<WriteID32, V2Write_12cyc_1M0>; -def : SchedAlias<WriteID64, V2Write_20cyc_1M0>; +def : SchedAlias<WriteID32, V2Write_12c_1M0>; +def : SchedAlias<WriteID64, V2Write_20c_1M0>; -def : SchedAlias<WriteIM32, V2Write_2cyc_1M>; -def : SchedAlias<WriteIM64, V2Write_2cyc_1M>; +def : SchedAlias<WriteIM32, V2Write_2c_1M>; +def : SchedAlias<WriteIM64, V2Write_2c_1M>; // Multiply // Multiply accumulate, W-form @@ -1175,7 +1175,7 @@ def : InstRW<[V2Wr_IMUL, ReadIM, ReadIM, V2Rd_IMA], (instregex "^(S|U)M(ADD|SUB)Lrrr$")>; // Multiply high -def : InstRW<[V2Write_3cyc_1M], (instrs SMULHrr, UMULHrr)>; +def : InstRW<[V2Write_3c_1M], (instrs SMULHrr, UMULHrr)>; // Pointer Authentication Instructions (v8.3 PAC) // ----------------------------------------------------------------------------- @@ -1185,27 +1185,27 @@ def : InstRW<[V2Write_3cyc_1M], (instrs SMULHrr, UMULHrr)>; // Compute pointer authentication code for data address // Compute pointer authentication code, using generic key // Compute pointer authentication code for instruction address -def : InstRW<[V2Write_5cyc_1M0], (instregex "^AUT", "^PAC")>; +def : InstRW<[V2Write_5c_1M0], (instregex "^AUT", "^PAC")>; // Branch and link, register, with pointer authentication // Branch, register, with pointer authentication // Branch, return, with pointer authentication -def : InstRW<[V2Write_6cyc_1M0_1B], (instrs BLRAA, BLRAAZ, BLRAB, BLRABZ, BRAA, +def : InstRW<[V2Write_6c_1M0_1B], (instrs BLRAA, BLRAAZ, BLRAB, BLRABZ, BRAA, BRAAZ, BRAB, BRABZ, RETAA, RETAB, ERETAA, ERETAB)>; // Load register, with pointer authentication -def : InstRW<[V2Write_9cyc_1M0_1L], (instregex "^LDRA[AB](indexed|writeback)")>; +def : InstRW<[V2Write_9c_1M0_1L], (instregex "^LDRA[AB](indexed|writeback)")>; // Strip pointer authentication code -def : InstRW<[V2Write_2cyc_1M0], (instrs XPACD, XPACI, XPACLRI)>; +def : InstRW<[V2Write_2c_1M0], (instrs XPACD, XPACI, XPACLRI)>; // Miscellaneous data-processing instructions // ----------------------------------------------------------------------------- // Address generation -def : InstRW<[V2Write_1cyc_1F], (instrs ADR, ADRP)>; +def : InstRW<[V2Write_1c_1F], (instrs ADR, ADRP)>; // Bitfield extract, one reg // Bitfield extract, two regs @@ -1213,27 +1213,27 @@ def : SchedAlias<WriteExtr, V2Write_Extr>; def : InstRW<[V2Write_Extr], (instrs EXTRWrri, EXTRXrri)>; // Bitfield move, basic -def : SchedAlias<WriteIS, V2Write_1cyc_1I>; +def : SchedAlias<WriteIS, V2Write_1c_1I>; // Bitfield move, insert -def : InstRW<[V2Write_2cyc_1M], (instregex "^BFM[WX]ri$")>; +def : InstRW<[V2Write_2c_1M], (instregex "^BFM[WX]ri$")>; // Load instructions // ----------------------------------------------------------------------------- // NOTE: SOG p. 19: Throughput of LDN?P X-form should be 2, but reported as 3. -def : SchedAlias<WriteLD, V2Write_4cyc_1L>; -def : SchedAlias<WriteLDIdx, V2Write_4cyc_1L>; +def : SchedAlias<WriteLD, V2Write_4c_1L>; +def : SchedAlias<WriteLDIdx, V2Write_4c_1L>; // Load register, literal -def : InstRW<[V2Write_5cyc_1L_1F], (instrs LDRWl, LDRXl, LDRSWl, PRFMl)>; +def : InstRW<[V2Write_5c_1L_1F], (instrs LDRWl, LDRXl, LDRSWl, PRFMl)>; // Load pair, signed immed offset, signed words -def : InstRW<[V2Write_5cyc_1I_3L, WriteLDHi], (instrs LDPSWi)>; +def : InstRW<[V2Write_5c_1I_3L, WriteLDHi], (instrs LDPSWi)>; // Load pair, immed post-index or immed pre-index, signed words -def : InstRW<[WriteAdr, V2Write_5cyc_1I_3L, WriteLDHi], +def : InstRW<[WriteAdr, V2Write_5c_1I_3L, WriteLDHi], (instregex "^LDPSW(post|pre)$")>; // Store instructions @@ -1241,17 +1241,17 @@ def : InstRW<[WriteAdr, V2Write_5cyc_1I_3L, WriteLDHi], // NOTE: SOG, p. 20: Unsure if STRH uses pipeline I. -def : SchedAlias<WriteST, V2Write_1cyc_1L01_1D>; -def : SchedAlias<WriteSTIdx, V2Write_1cyc_1L01_1D>; -def : SchedAlias<WriteSTP, V2Write_1cyc_1L01_1D>; -def : SchedAlias<WriteAdr, V2Write_1cyc_1I>; +def : SchedAlias<WriteST, V2Write_1c_1L01_1D>; +def : SchedAlias<WriteSTIdx, V2Write_1c_1L01_1D>; +def : SchedAlias<WriteSTP, V2Write_1c_1L01_1D>; +def : SchedAlias<WriteAdr, V2Write_1c_1I>; // Tag load instructions // ----------------------------------------------------------------------------- // Load allocation tag // Load multiple allocation tags -def : InstRW<[V2Write_4cyc_1L], (instrs LDG, LDGM)>; +def : InstRW<[V2Write_4c_1L], (instrs LDG, LDGM)>; // Tag store instructions // ----------------------------------------------------------------------------- @@ -1262,7 +1262,7 @@ def : InstRW<[V2Write_4cyc_1L], (instrs LDG, LDGM)>; // Store Allocation Tag to one or two granules, zeroing, pre-index // Store allocation tag and reg pair to memory, post-Index // Store allocation tag and reg pair to memory, pre-Index -def : InstRW<[V2Write_1cyc_1L01_1D_1I], (instrs STGPreIndex, STGPostIndex, +def : InstRW<[V2Write_1c_1L01_1D_1I], (instrs STGPreIndex, STGPostIndex, ST2GPreIndex, ST2GPostIndex, STZGPreIndex, STZGPostIndex, STZ2GPreIndex, STZ2GPostIndex, @@ -1272,7 +1272,7 @@ def : InstRW<[V2Write_1cyc_1L01_1D_1I], (instrs STGPreIndex, STGPostIndex, // Store allocation tag to two granules, zeroing, signed offset // Store allocation tag and reg pair to memory, signed offset // Store multiple allocation tags -def : InstRW<[V2Write_1cyc_1L01_1D], (instrs STGi, ST2Gi, STZGi, +def : InstRW<[V2Write_1c_1L01_1D], (instrs STGi, ST2Gi, STZGi, STZ2Gi, STGPi, STGM, STZGM)>; // FP data processing instructions @@ -1283,27 +1283,27 @@ def : InstRW<[V2Write_1cyc_1L01_1D], (instrs STGi, ST2Gi, STZGi, // FP min/max // FP negate // FP select -def : SchedAlias<WriteF, V2Write_2cyc_1V>; +def : SchedAlias<WriteF, V2Write_2c_1V>; // FP compare -def : SchedAlias<WriteFCmp, V2Write_2cyc_1V0>; +def : SchedAlias<WriteFCmp, V2Write_2c_1V0>; // FP divide, square root -def : SchedAlias<WriteFDiv, V2Write_7cyc_1V02>; +def : SchedAlias<WriteFDiv, V2Write_7c_1V02>; // FP divide, H-form -def : InstRW<[V2Write_7cyc_1V02], (instrs FDIVHrr)>; +def : InstRW<[V2Write_7c_1V02], (instrs FDIVHrr)>; // FP divide, S-form -def : InstRW<[V2Write_10cyc_1V02], (instrs FDIVSrr)>; +def : InstRW<[V2Write_10c_1V02], (instrs FDIVSrr)>; // FP divide, D-form -def : InstRW<[V2Write_15cyc_1V02], (instrs FDIVDrr)>; +def : InstRW<[V2Write_15c_1V02], (instrs FDIVDrr)>; // FP square root, H-form -def : InstRW<[V2Write_7cyc_1V02], (instrs FSQRTHr)>; +def : InstRW<[V2Write_7c_1V02], (instrs FSQRTHr)>; // FP square root, S-form -def : InstRW<[V2Write_9cyc_1V02], (instrs FSQRTSr)>; +def : InstRW<[V2Write_9c_1V02], (instrs FSQRTSr)>; // FP square root, D-form -def : InstRW<[V2Write_16cyc_1V02], (instrs FSQRTDr)>; +def : InstRW<[V2Write_16c_1V02], (instrs FSQRTDr)>; // FP multiply def : WriteRes<WriteFMul, [V2UnitV]> { let Latency = 3; } @@ -1313,56 +1313,56 @@ def : InstRW<[V2Wr_FMA, ReadDefault, ReadDefault, V2Rd_FMA], (instregex "^FN?M(ADD|SUB)[HSD]rrr$")>; // FP round to integral -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FRINT[AIMNPXZ][HSD]r$", +def : InstRW<[V2Write_3c_1V02], (instregex "^FRINT[AIMNPXZ][HSD]r$", "^FRINT(32|64)[XZ][SD]r$")>; // FP miscellaneous instructions // ----------------------------------------------------------------------------- // FP convert, from gen to vec reg -def : InstRW<[V2Write_3cyc_1M0], (instregex "^[SU]CVTF[SU][WX][HSD]ri$")>; +def : InstRW<[V2Write_3c_1M0], (instregex "^[SU]CVTF[SU][WX][HSD]ri$")>; // FP convert, from vec to gen reg -def : InstRW<[V2Write_3cyc_1V01], +def : InstRW<[V2Write_3c_1V01], (instregex "^FCVT[AMNPZ][SU][SU][WX][HSD]ri?$")>; // FP convert, Javascript from vec to gen reg -def : SchedAlias<WriteFCvt, V2Write_3cyc_1V0>; +def : SchedAlias<WriteFCvt, V2Write_3c_1V0>; // FP convert, from vec to vec reg -def : InstRW<[V2Write_3cyc_1V02], (instrs FCVTSHr, FCVTDHr, FCVTHSr, FCVTDSr, +def : InstRW<[V2Write_3c_1V02], (instrs FCVTSHr, FCVTDHr, FCVTHSr, FCVTDSr, FCVTHDr, FCVTSDr, FCVTXNv1i64)>; // FP move, immed // FP move, register -def : SchedAlias<WriteFImm, V2Write_2cyc_1V>; +def : SchedAlias<WriteFImm, V2Write_2c_1V>; // FP transfer, from gen to low half of vec reg -def : InstRW<[V2Write_0or3cyc_1M0], +def : InstRW<[V2Write_0or3c_1M0], (instrs FMOVWHr, FMOVXHr, FMOVWSr, FMOVXDr)>; // FP transfer, from gen to high half of vec reg -def : InstRW<[V2Write_5cyc_1M0_1V], (instrs FMOVXDHighr)>; +def : InstRW<[V2Write_5c_1M0_1V], (instrs FMOVXDHighr)>; // FP transfer, from vec to gen reg -def : SchedAlias<WriteFCopy, V2Write_2cyc_2V01>; +def : SchedAlias<WriteFCopy, V2Write_2c_2V01>; // FP load instructions // ----------------------------------------------------------------------------- // Load vector reg, literal, S/D/Q forms -def : InstRW<[V2Write_7cyc_1F_1L], (instregex "^LDR[SDQ]l$")>; +def : InstRW<[V2Write_7c_1F_1L], (instregex "^LDR[SDQ]l$")>; // Load vector reg, unscaled immed -def : InstRW<[V2Write_6cyc_1L], (instregex "^LDUR[BHSDQ]i$")>; +def : InstRW<[V2Write_6c_1L], (instregex "^LDUR[BHSDQ]i$")>; // Load vector reg, immed post-index // Load vector reg, immed pre-index -def : InstRW<[WriteAdr, V2Write_6cyc_1I_1L], +def : InstRW<[WriteAdr, V2Write_6c_1I_1L], (instregex "^LDR[BHSDQ](pre|post)$")>; // Load vector reg, unsigned immed -def : InstRW<[V2Write_6cyc_1L], (instregex "^LDR[BHSDQ]ui$")>; +def : InstRW<[V2Write_6c_1L], (instregex "^LDR[BHSDQ]ui$")>; // Load vector reg, register offset, basic // Load vector reg, register offset, scale, S/D-form @@ -1373,19 +1373,19 @@ def : InstRW<[V2Write_6cyc_1L], (instregex "^LDR[BHSDQ]ui$")>; def : InstRW<[V2Write_LdrHQ, ReadAdrBase], (instregex "^LDR[BHSDQ]ro[WX]$")>; // Load vector pair, immed offset, S/D-form -def : InstRW<[V2Write_6cyc_1L, WriteLDHi], (instregex "^LDN?P[SD]i$")>; +def : InstRW<[V2Write_6c_1L, WriteLDHi], (instregex "^LDN?P[SD]i$")>; // Load vector pair, immed offset, Q-form -def : InstRW<[V2Write_6cyc_2L, WriteLDHi], (instrs LDPQi, LDNPQi)>; +def : InstRW<[V2Write_6c_2L, WriteLDHi], (instrs LDPQi, LDNPQi)>; // Load vector pair, immed post-index, S/D-form // Load vector pair, immed pre-index, S/D-form -def : InstRW<[WriteAdr, V2Write_6cyc_1I_1L, WriteLDHi], +def : InstRW<[WriteAdr, V2Write_6c_1I_1L, WriteLDHi], (instregex "^LDP[SD](pre|post)$")>; // Load vector pair, immed post-index, Q-form // Load vector pair, immed pre-index, Q-form -def : InstRW<[WriteAdr, V2Write_6cyc_2I_2L, WriteLDHi], (instrs LDPQpost, +def : InstRW<[WriteAdr, V2Write_6c_2I_2L, WriteLDHi], (instrs LDPQpost, LDPQpre)>; // FP store instructions @@ -1393,18 +1393,18 @@ def : InstRW<[WriteAdr, V2Write_6cyc_2I_2L, WriteLDHi], (instrs LDPQpost, // Store vector reg, unscaled immed, B/H/S/D-form // Store vector reg, unscaled immed, Q-form -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "^STUR[BHSDQ]i$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "^STUR[BHSDQ]i$")>; // Store vector reg, immed post-index, B/H/S/D-form // Store vector reg, immed post-index, Q-form // Store vector reg, immed pre-index, B/H/S/D-form // Store vector reg, immed pre-index, Q-form -def : InstRW<[WriteAdr, V2Write_2cyc_1L01_1V01_1I], +def : InstRW<[WriteAdr, V2Write_2c_1L01_1V01_1I], (instregex "^STR[BHSDQ](pre|post)$")>; // Store vector reg, unsigned immed, B/H/S/D-form // Store vector reg, unsigned immed, Q-form -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "^STR[BHSDQ]ui$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "^STR[BHSDQ]ui$")>; // Store vector reg, register offset, basic, B/H/S/D-form // Store vector reg, register offset, basic, Q-form @@ -1421,23 +1421,23 @@ def : InstRW<[V2Write_StrHQ, ReadAdrBase], // Store vector pair, immed offset, S-form // Store vector pair, immed offset, D-form -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "^STN?P[SD]i$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "^STN?P[SD]i$")>; // Store vector pair, immed offset, Q-form -def : InstRW<[V2Write_2cyc_1L01_2V01], (instrs STPQi, STNPQi)>; +def : InstRW<[V2Write_2c_1L01_2V01], (instrs STPQi, STNPQi)>; // Store vector pair, immed post-index, S-form // Store vector pair, immed post-index, D-form // Store vector pair, immed pre-index, S-form // Store vector pair, immed pre-index, D-form -def : InstRW<[WriteAdr, V2Write_2cyc_1L01_1V01_1I], +def : InstRW<[WriteAdr, V2Write_2c_1L01_1V01_1I], (instregex "^STP[SD](pre|post)$")>; // Store vector pair, immed post-index, Q-form -def : InstRW<[V2Write_2cyc_1L01_2V01_1I], (instrs STPQpost)>; +def : InstRW<[V2Write_2c_1L01_2V01_1I], (instrs STPQpost)>; // Store vector pair, immed pre-index, Q-form -def : InstRW<[V2Write_2cyc_1L01_2V01_2I], (instrs STPQpre)>; +def : InstRW<[V2Write_2c_1L01_2V01_2I], (instrs STPQpre)>; // ASIMD integer instructions // ----------------------------------------------------------------------------- @@ -1450,22 +1450,22 @@ def : InstRW<[V2Write_2cyc_1L01_2V01_2I], (instrs STPQpre)>; // ASIMD compare // ASIMD logical // ASIMD max/min, basic and pair-wise -def : SchedAlias<WriteVd, V2Write_2cyc_1V>; -def : SchedAlias<WriteVq, V2Write_2cyc_1V>; +def : SchedAlias<WriteVd, V2Write_2c_1V>; +def : SchedAlias<WriteVq, V2Write_2c_1V>; // ASIMD absolute diff accum // ASIMD absolute diff accum long def : InstRW<[V2Wr_VA, V2Rd_VA], (instregex "^[SU]ABAL?v")>; // ASIMD arith, reduce, 4H/4S -def : InstRW<[V2Write_2cyc_1V13], (instregex "^(ADDV|[SU]ADDLV)v4(i16|i32)v$")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^(ADDV|[SU]ADDLV)v4(i16|i32)v$")>; // ASIMD arith, reduce, 8B/8H -def : InstRW<[V2Write_4cyc_1V13_1V], +def : InstRW<[V2Write_4c_1V13_1V], (instregex "^(ADDV|[SU]ADDLV)v8(i8|i16)v$")>; // ASIMD arith, reduce, 16B -def : InstRW<[V2Write_4cyc_2V13], (instregex "^(ADDV|[SU]ADDLV)v16i8v$")>; +def : InstRW<[V2Write_4c_2V13], (instregex "^(ADDV|[SU]ADDLV)v16i8v$")>; // ASIMD dot product // ASIMD dot product using signed and unsigned integers @@ -1476,18 +1476,18 @@ def : InstRW<[V2Wr_VDOT, V2Rd_VDOT], def : InstRW<[V2Wr_VMMA, V2Rd_VMMA], (instrs SMMLA, UMMLA, USMMLA)>; // ASIMD max/min, reduce, 4H/4S -def : InstRW<[V2Write_2cyc_1V13], (instregex "^[SU](MAX|MIN)Vv4i16v$", - "^[SU](MAX|MIN)Vv4i32v$")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^[SU](MAX|MIN)Vv4i16v$", + "^[SU](MAX|MIN)Vv4i32v$")>; // ASIMD max/min, reduce, 8B/8H -def : InstRW<[V2Write_4cyc_1V13_1V], (instregex "^[SU](MAX|MIN)Vv8i8v$", - "^[SU](MAX|MIN)Vv8i16v$")>; +def : InstRW<[V2Write_4c_1V13_1V], (instregex "^[SU](MAX|MIN)Vv8i8v$", + "^[SU](MAX|MIN)Vv8i16v$")>; // ASIMD max/min, reduce, 16B -def : InstRW<[V2Write_4cyc_2V13], (instregex "[SU](MAX|MIN)Vv16i8v$")>; +def : InstRW<[V2Write_4c_2V13], (instregex "[SU](MAX|MIN)Vv16i8v$")>; // ASIMD multiply -def : InstRW<[V2Write_4cyc_1V02], (instregex "^MULv", "^SQ(R)?DMULHv")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^MULv", "^SQ(R)?DMULHv")>; // ASIMD multiply accumulate def : InstRW<[V2Wr_VMA, V2Rd_VMA], (instregex "^MLAv", "^MLSv")>; @@ -1499,14 +1499,14 @@ def : InstRW<[V2Wr_VMAH, V2Rd_VMAH], (instregex "^SQRDMLAHv", "^SQRDMLSHv")>; def : InstRW<[V2Wr_VMAL, V2Rd_VMAL], (instregex "^[SU]MLALv", "^[SU]MLSLv")>; // ASIMD multiply accumulate saturating long -def : InstRW<[V2Write_4cyc_1V02], (instregex "^SQDML[AS]L[iv]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^SQDML[AS]L[iv]")>; // ASIMD multiply/multiply long (8x8) polynomial, D-form // ASIMD multiply/multiply long (8x8) polynomial, Q-form -def : InstRW<[V2Write_3cyc_1V23], (instregex "^PMULL?(v8i8|v16i8)$")>; +def : InstRW<[V2Write_3c_1V23], (instregex "^PMULL?(v8i8|v16i8)$")>; // ASIMD multiply long -def : InstRW<[V2Write_3cyc_1V02], (instregex "^[SU]MULLv", "^SQDMULL[iv]")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^[SU]MULLv", "^SQDMULL[iv]")>; // ASIMD pairwise add and accumulate long def : InstRW<[V2Wr_VPA, V2Rd_VPA], (instregex "^[SU]ADALPv")>; @@ -1515,25 +1515,25 @@ def : InstRW<[V2Wr_VPA, V2Rd_VPA], (instregex "^[SU]ADALPv")>; def : InstRW<[V2Wr_VSA, V2Rd_VSA], (instregex "^[SU]SRA[dv]", "^[SU]RSRA[dv]")>; // ASIMD shift by immed, basic -def : InstRW<[V2Write_2cyc_1V13], (instregex "^SHL[dv]", "^SHLLv", "^SHRNv", - "^SSHLLv", "^SSHR[dv]", "^USHLLv", - "^USHR[dv]")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^SHL[dv]", "^SHLLv", "^SHRNv", + "^SSHLLv", "^SSHR[dv]", "^USHLLv", + "^USHR[dv]")>; // ASIMD shift by immed and insert, basic -def : InstRW<[V2Write_2cyc_1V13], (instregex "^SLI[dv]", "^SRI[dv]")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^SLI[dv]", "^SRI[dv]")>; // ASIMD shift by immed, complex -def : InstRW<[V2Write_4cyc_1V13], +def : InstRW<[V2Write_4c_1V13], (instregex "^RSHRNv", "^SQRSHRU?N[bhsv]", "^(SQSHLU?|UQSHL)[bhsd]$", "^(SQSHLU?|UQSHL)(v8i8|v16i8|v4i16|v8i16|v2i32|v4i32|v2i64)_shift$", "^SQSHRU?N[bhsv]", "^SRSHR[dv]", "^UQRSHRN[bhsv]", "^UQSHRN[bhsv]", "^URSHR[dv]")>; // ASIMD shift by register, basic -def : InstRW<[V2Write_2cyc_1V13], (instregex "^[SU]SHLv")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^[SU]SHLv")>; // ASIMD shift by register, complex -def : InstRW<[V2Write_4cyc_1V13], +def : InstRW<[V2Write_4c_1V13], (instregex "^[SU]RSHLv", "^[SU]QRSHLv", "^[SU]QSHL(v1i8|v1i16|v1i32|v1i64|v8i8|v16i8|v4i16|v8i16|v2i32|v4i32|v2i64)$")>; @@ -1553,62 +1553,62 @@ def : InstRW<[V2Write_4cyc_1V13], def : InstRW<[V2Wr_VFCMA, V2Rd_VFCMA], (instregex "^FCMLAv")>; // ASIMD FP convert, long (F16 to F32) -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FCVTL(v4|v8)i16")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FCVTL(v4|v8)i16")>; // ASIMD FP convert, long (F32 to F64) -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FCVTL(v2|v4)i32")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FCVTL(v2|v4)i32")>; // ASIMD FP convert, narrow (F32 to F16) -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FCVTN(v4|v8)i16")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FCVTN(v4|v8)i16")>; // ASIMD FP convert, narrow (F64 to F32) -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FCVTN(v2|v4)i32", +def : InstRW<[V2Write_3c_1V02], (instregex "^FCVTN(v2|v4)i32", "^FCVTXN(v2|v4)f32")>; // ASIMD FP convert, other, D-form F32 and Q-form F64 -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FCVT[AMNPZ][SU]v2f(32|64)$", - "^FCVT[AMNPZ][SU]v1i64$", - "^FCVTZ[SU]d$", - "^[SU]CVTFv2f(32|64)$", - "^[SU]CVTFv1i64$", - "^[SU]CVTFd$")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FCVT[AMNPZ][SU]v2f(32|64)$", + "^FCVT[AMNPZ][SU]v1i64$", + "^FCVTZ[SU]d$", + "^[SU]CVTFv2f(32|64)$", + "^[SU]CVTFv1i64$", + "^[SU]CVTFd$")>; // ASIMD FP convert, other, D-form F16 and Q-form F32 -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FCVT[AMNPZ][SU]v4f(16|32)$", - "^FCVT[AMNPZ][SU]v1i32$", - "^FCVTZ[SU]s$", - "^[SU]CVTFv4f(16|32)$", - "^[SU]CVTFv1i32$", - "^[SU]CVTFs$")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FCVT[AMNPZ][SU]v4f(16|32)$", + "^FCVT[AMNPZ][SU]v1i32$", + "^FCVTZ[SU]s$", + "^[SU]CVTFv4f(16|32)$", + "^[SU]CVTFv1i32$", + "^[SU]CVTFs$")>; // ASIMD FP convert, other, Q-form F16 -def : InstRW<[V2Write_6cyc_4V02], (instregex "^FCVT[AMNPZ][SU]v8f16$", - "^FCVT[AMNPZ][SU]v1f16$", - "^FCVTZ[SU]h$", - "^[SU]CVTFv8f16$", - "^[SU]CVTFv1i16$", - "^[SU]CVTFh$")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^FCVT[AMNPZ][SU]v8f16$", + "^FCVT[AMNPZ][SU]v1f16$", + "^FCVTZ[SU]h$", + "^[SU]CVTFv8f16$", + "^[SU]CVTFv1i16$", + "^[SU]CVTFh$")>; // ASIMD FP divide, D-form, F16 -def : InstRW<[V2Write_7cyc_1V02_7rc], (instrs FDIVv4f16)>; +def : InstRW<[V2Write_7c_1V02_7rc], (instrs FDIVv4f16)>; // ASIMD FP divide, D-form, F32 -def : InstRW<[V2Write_10cyc_1V02_5rc], (instrs FDIVv2f32)>; +def : InstRW<[V2Write_10c_1V02_5rc], (instrs FDIVv2f32)>; // ASIMD FP divide, Q-form, F16 -def : InstRW<[V2Write_13cyc_1V02_13rc], (instrs FDIVv8f16)>; +def : InstRW<[V2Write_13c_1V02_13rc], (instrs FDIVv8f16)>; // ASIMD FP divide, Q-form, F32 -def : InstRW<[V2Write_10cyc_1V02_10rc], (instrs FDIVv4f32)>; +def : InstRW<[V2Write_10c_1V02_10rc], (instrs FDIVv4f32)>; // ASIMD FP divide, Q-form, F64 -def : InstRW<[V2Write_15cyc_1V02_14rc], (instrs FDIVv2f64)>; +def : InstRW<[V2Write_15c_1V02_14rc], (instrs FDIVv2f64)>; // ASIMD FP max/min, reduce, F32 and D-form F16 -def : InstRW<[V2Write_4cyc_2V], (instregex "^(FMAX|FMIN)(NM)?Vv4(i16|i32)v$")>; +def : InstRW<[V2Write_4c_2V], (instregex "^(FMAX|FMIN)(NM)?Vv4(i16|i32)v$")>; // ASIMD FP max/min, reduce, Q-form F16 -def : InstRW<[V2Write_6cyc_3V], (instregex "^(FMAX|FMIN)(NM)?Vv8i16v$")>; +def : InstRW<[V2Write_6c_3V], (instregex "^(FMAX|FMIN)(NM)?Vv8i16v$")>; // ASIMD FP multiply def : InstRW<[V2Wr_VFM], (instregex "^FMULv", "^FMULXv")>; @@ -1620,38 +1620,38 @@ def : InstRW<[V2Wr_VFMA, V2Rd_VFMA], (instregex "^FMLAv", "^FMLSv")>; def : InstRW<[V2Wr_VFMAL, V2Rd_VFMAL], (instregex "^FML[AS]L2?(lane)?v")>; // ASIMD FP round, D-form F32 and Q-form F64 -def : InstRW<[V2Write_3cyc_1V02], +def : InstRW<[V2Write_3c_1V02], (instregex "^FRINT[AIMNPXZ]v2f(32|64)$", "^FRINT(32|64)[XZ]v2f(32|64)$")>; // ASIMD FP round, D-form F16 and Q-form F32 -def : InstRW<[V2Write_4cyc_2V02], +def : InstRW<[V2Write_4c_2V02], (instregex "^FRINT[AIMNPXZ]v4f(16|32)$", "^FRINT(32|64)[XZ]v4f32$")>; // ASIMD FP round, Q-form F16 -def : InstRW<[V2Write_6cyc_4V02], (instregex "^FRINT[AIMNPXZ]v8f16$")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^FRINT[AIMNPXZ]v8f16$")>; // ASIMD FP square root, D-form, F16 -def : InstRW<[V2Write_7cyc_1V02_7rc], (instrs FSQRTv4f16)>; +def : InstRW<[V2Write_7c_1V02_7rc], (instrs FSQRTv4f16)>; // ASIMD FP square root, D-form, F32 -def : InstRW<[V2Write_10cyc_1V02_5rc], (instrs FSQRTv2f32)>; +def : InstRW<[V2Write_10c_1V02_5rc], (instrs FSQRTv2f32)>; // ASIMD FP square root, Q-form, F16 -def : InstRW<[V2Write_13cyc_1V02_13rc], (instrs FSQRTv8f16)>; +def : InstRW<[V2Write_13c_1V02_13rc], (instrs FSQRTv8f16)>; // ASIMD FP square root, Q-form, F32 -def : InstRW<[V2Write_10cyc_1V02_9rc], (instrs FSQRTv4f32)>; +def : InstRW<[V2Write_10c_1V02_9rc], (instrs FSQRTv4f32)>; // ASIMD FP square root, Q-form, F64 -def : InstRW<[V2Write_16cyc_1V02_15rc], (instrs FSQRTv2f64)>; +def : InstRW<[V2Write_16c_1V02_15rc], (instrs FSQRTv2f64)>; // ASIMD BFloat16 (BF16) instructions // ----------------------------------------------------------------------------- // ASIMD convert, F32 to BF16 -def : InstRW<[V2Write_4cyc_2V02], (instrs BFCVTN, BFCVTN2)>; +def : InstRW<[V2Write_4c_2V02], (instrs BFCVTN, BFCVTN2)>; // ASIMD dot product def : InstRW<[V2Wr_VBFDOT, V2Rd_VBFDOT], (instrs BFDOTv4bf16, BFDOTv8bf16)>; @@ -1664,7 +1664,7 @@ def : InstRW<[V2Wr_VBFMAL, V2Rd_VBFMAL], (instrs BFMLALB, BFMLALBIdx, BFMLALT, BFMLALTIdx)>; // Scalar convert, F32 to BF16 -def : InstRW<[V2Write_3cyc_1V02], (instrs BFCVT)>; +def : InstRW<[V2Write_3c_1V02], (instrs BFCVT)>; // ASIMD miscellaneous instructions // ----------------------------------------------------------------------------- @@ -1683,317 +1683,317 @@ def : InstRW<[V2Write_3cyc_1V02], (instrs BFCVT)>; // ASIMD transpose // ASIMD unzip/zip // Handled by SchedAlias<WriteV[dq], ...> -def : InstRW<[V2Write_0or2cyc_1V], (instrs MOVID, MOVIv2d_ns)>; +def : InstRW<[V2Write_0or2c_1V], (instrs MOVID, MOVIv2d_ns)>; // ASIMD duplicate, gen reg -def : InstRW<[V2Write_3cyc_1M0], (instregex "^DUPv.+gpr")>; +def : InstRW<[V2Write_3c_1M0], (instregex "^DUPv.+gpr")>; // ASIMD extract narrow, saturating -def : InstRW<[V2Write_4cyc_1V13], (instregex "^[SU]QXTNv", "^SQXTUNv")>; +def : InstRW<[V2Write_4c_1V13], (instregex "^[SU]QXTNv", "^SQXTUNv")>; // ASIMD reciprocal and square root estimate, D-form U32 -def : InstRW<[V2Write_3cyc_1V02], (instrs URECPEv2i32, URSQRTEv2i32)>; +def : InstRW<[V2Write_3c_1V02], (instrs URECPEv2i32, URSQRTEv2i32)>; // ASIMD reciprocal and square root estimate, Q-form U32 -def : InstRW<[V2Write_4cyc_2V02], (instrs URECPEv4i32, URSQRTEv4i32)>; +def : InstRW<[V2Write_4c_2V02], (instrs URECPEv4i32, URSQRTEv4i32)>; // ASIMD reciprocal and square root estimate, D-form F32 and scalar forms -def : InstRW<[V2Write_3cyc_1V02], (instrs FRECPEv1f16, FRECPEv1i32, - FRECPEv1i64, FRECPEv2f32, - FRSQRTEv1f16, FRSQRTEv1i32, - FRSQRTEv1i64, FRSQRTEv2f32)>; +def : InstRW<[V2Write_3c_1V02], (instrs FRECPEv1f16, FRECPEv1i32, + FRECPEv1i64, FRECPEv2f32, + FRSQRTEv1f16, FRSQRTEv1i32, + FRSQRTEv1i64, FRSQRTEv2f32)>; // ASIMD reciprocal and square root estimate, D-form F16 and Q-form F32 -def : InstRW<[V2Write_4cyc_2V02], (instrs FRECPEv4f16, FRECPEv4f32, - FRSQRTEv4f16, FRSQRTEv4f32)>; +def : InstRW<[V2Write_4c_2V02], (instrs FRECPEv4f16, FRECPEv4f32, + FRSQRTEv4f16, FRSQRTEv4f32)>; // ASIMD reciprocal and square root estimate, Q-form F16 -def : InstRW<[V2Write_6cyc_4V02], (instrs FRECPEv8f16, FRSQRTEv8f16)>; +def : InstRW<[V2Write_6c_4V02], (instrs FRECPEv8f16, FRSQRTEv8f16)>; // ASIMD reciprocal exponent -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FRECPXv")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FRECPXv")>; // ASIMD reciprocal step -def : InstRW<[V2Write_4cyc_1V], (instregex "^FRECPS(32|64|v)", - "^FRSQRTS(32|64|v)")>; +def : InstRW<[V2Write_4c_1V], (instregex "^FRECPS(32|64|v)", + "^FRSQRTS(32|64|v)")>; // ASIMD table lookup, 1 or 2 table regs -def : InstRW<[V2Write_2cyc_1V01], (instrs TBLv8i8One, TBLv16i8One, - TBLv8i8Two, TBLv16i8Two)>; +def : InstRW<[V2Write_2c_1V01], (instrs TBLv8i8One, TBLv16i8One, + TBLv8i8Two, TBLv16i8Two)>; // ASIMD table lookup, 3 table regs -def : InstRW<[V2Write_4cyc_2V01], (instrs TBLv8i8Three, TBLv16i8Three)>; +def : InstRW<[V2Write_4c_2V01], (instrs TBLv8i8Three, TBLv16i8Three)>; // ASIMD table lookup, 4 table regs -def : InstRW<[V2Write_4cyc_3V01], (instrs TBLv8i8Four, TBLv16i8Four)>; +def : InstRW<[V2Write_4c_3V01], (instrs TBLv8i8Four, TBLv16i8Four)>; // ASIMD table lookup extension, 2 table reg -def : InstRW<[V2Write_4cyc_2V], (instrs TBXv8i8Two, TBXv16i8Two)>; +def : InstRW<[V2Write_4c_2V], (instrs TBXv8i8Two, TBXv16i8Two)>; // ASIMD table lookup extension, 3 table reg -def : InstRW<[V2Write_6cyc_3V], (instrs TBXv8i8Three, TBXv16i8Three)>; +def : InstRW<[V2Write_6c_3V], (instrs TBXv8i8Three, TBXv16i8Three)>; // ASIMD table lookup extension, 4 table reg -def : InstRW<[V2Write_6cyc_5V], (instrs TBXv8i8Four, TBXv16i8Four)>; +def : InstRW<[V2Write_6c_5V], (instrs TBXv8i8Four, TBXv16i8Four)>; // ASIMD transfer, element to gen reg -def : InstRW<[V2Write_2cyc_2V01], (instregex "^[SU]MOVv")>; +def : InstRW<[V2Write_2c_2V01], (instregex "^[SU]MOVv")>; // ASIMD transfer, gen reg to element -def : InstRW<[V2Write_5cyc_1M0_1V], (instregex "^INSvi(8|16|32|64)gpr$")>; +def : InstRW<[V2Write_5c_1M0_1V], (instregex "^INSvi(8|16|32|64)gpr$")>; // ASIMD load instructions // ----------------------------------------------------------------------------- // ASIMD load, 1 element, multiple, 1 reg, D-form -def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1Onev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_1L], +def : InstRW<[V2Write_6c_1L], (instregex "^LD1Onev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_1L], (instregex "^LD1Onev(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 1 reg, Q-form -def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1Onev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_1L], +def : InstRW<[V2Write_6c_1L], (instregex "^LD1Onev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_1L], (instregex "^LD1Onev(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, multiple, 2 reg, D-form -def : InstRW<[V2Write_6cyc_2L], (instregex "^LD1Twov(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_2L], +def : InstRW<[V2Write_6c_2L], (instregex "^LD1Twov(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_2L], (instregex "^LD1Twov(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 2 reg, Q-form -def : InstRW<[V2Write_6cyc_2L], (instregex "^LD1Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_2L], +def : InstRW<[V2Write_6c_2L], (instregex "^LD1Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_2L], (instregex "^LD1Twov(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, multiple, 3 reg, D-form -def : InstRW<[V2Write_6cyc_3L], (instregex "^LD1Threev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_3L], +def : InstRW<[V2Write_6c_3L], (instregex "^LD1Threev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_3L], (instregex "^LD1Threev(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 3 reg, Q-form -def : InstRW<[V2Write_6cyc_3L], (instregex "^LD1Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_3L], +def : InstRW<[V2Write_6c_3L], (instregex "^LD1Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_3L], (instregex "^LD1Threev(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, multiple, 4 reg, D-form -def : InstRW<[V2Write_7cyc_4L], (instregex "^LD1Fourv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_7cyc_4L], +def : InstRW<[V2Write_7c_4L], (instregex "^LD1Fourv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_7c_4L], (instregex "^LD1Fourv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, multiple, 4 reg, Q-form -def : InstRW<[V2Write_7cyc_4L], (instregex "^LD1Fourv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_7cyc_4L], +def : InstRW<[V2Write_7c_4L], (instregex "^LD1Fourv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_7c_4L], (instregex "^LD1Fourv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 1 element, one lane, B/H/S // ASIMD load, 1 element, one lane, D -def : InstRW<[V2Write_8cyc_1L_1V], (instregex "LD1i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_1V], (instregex "LD1i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_8c_1L_1V], (instregex "LD1i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_1V], (instregex "LD1i(8|16|32|64)_POST$")>; // ASIMD load, 1 element, all lanes, D-form, B/H/S // ASIMD load, 1 element, all lanes, D-form, D -def : InstRW<[V2Write_8cyc_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_8c_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_1V], (instregex "LD1Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 1 element, all lanes, Q-form -def : InstRW<[V2Write_8cyc_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_8c_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_1V], (instregex "LD1Rv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 2 element, multiple, D-form, B/H/S -def : InstRW<[V2Write_8cyc_1L_2V], (instregex "LD2Twov(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_2V], (instregex "LD2Twov(8b|4h|2s)_POST$")>; +def : InstRW<[V2Write_8c_1L_2V], (instregex "LD2Twov(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_2V], (instregex "LD2Twov(8b|4h|2s)_POST$")>; // ASIMD load, 2 element, multiple, Q-form, B/H/S // ASIMD load, 2 element, multiple, Q-form, D -def : InstRW<[V2Write_8cyc_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_8c_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_2L_2V], (instregex "LD2Twov(16b|8h|4s|2d)_POST$")>; // ASIMD load, 2 element, one lane, B/H // ASIMD load, 2 element, one lane, S // ASIMD load, 2 element, one lane, D -def : InstRW<[V2Write_8cyc_1L_2V], (instregex "LD2i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_2V], (instregex "LD2i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_8c_1L_2V], (instregex "LD2i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_2V], (instregex "LD2i(8|16|32|64)_POST$")>; // ASIMD load, 2 element, all lanes, D-form, B/H/S // ASIMD load, 2 element, all lanes, D-form, D -def : InstRW<[V2Write_8cyc_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_8c_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_2V], (instregex "LD2Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 2 element, all lanes, Q-form -def : InstRW<[V2Write_8cyc_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_8c_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_1L_2V], (instregex "LD2Rv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 3 element, multiple, D-form, B/H/S -def : InstRW<[V2Write_8cyc_2L_3V], (instregex "LD3Threev(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_2L_3V], (instregex "LD3Threev(8b|4h|2s)_POST$")>; +def : InstRW<[V2Write_8c_2L_3V], (instregex "LD3Threev(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, V2Write_8c_2L_3V], (instregex "LD3Threev(8b|4h|2s)_POST$")>; // ASIMD load, 3 element, multiple, Q-form, B/H/S // ASIMD load, 3 element, multiple, Q-form, D -def : InstRW<[V2Write_8cyc_3L_3V], (instregex "LD3Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_3L_3V], (instregex "LD3Threev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_8c_3L_3V], (instregex "LD3Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_3L_3V], (instregex "LD3Threev(16b|8h|4s|2d)_POST$")>; // ASIMD load, 3 element, one lane, B/H // ASIMD load, 3 element, one lane, S // ASIMD load, 3 element, one lane, D -def : InstRW<[V2Write_8cyc_2L_3V], (instregex "LD3i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_2L_3V], (instregex "LD3i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_8c_2L_3V], (instregex "LD3i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_8c_2L_3V], (instregex "LD3i(8|16|32|64)_POST$")>; // ASIMD load, 3 element, all lanes, D-form, B/H/S // ASIMD load, 3 element, all lanes, D-form, D -def : InstRW<[V2Write_8cyc_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_8c_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_2L_3V], (instregex "LD3Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 3 element, all lanes, Q-form, B/H/S // ASIMD load, 3 element, all lanes, Q-form, D -def : InstRW<[V2Write_8cyc_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_8c_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_3L_3V], (instregex "LD3Rv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 4 element, multiple, D-form, B/H/S -def : InstRW<[V2Write_8cyc_3L_4V], (instregex "LD4Fourv(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_3L_4V], (instregex "LD4Fourv(8b|4h|2s)_POST$")>; +def : InstRW<[V2Write_8c_3L_4V], (instregex "LD4Fourv(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, V2Write_8c_3L_4V], (instregex "LD4Fourv(8b|4h|2s)_POST$")>; // ASIMD load, 4 element, multiple, Q-form, B/H/S // ASIMD load, 4 element, multiple, Q-form, D -def : InstRW<[V2Write_9cyc_6L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_9cyc_6L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_9c_6L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_9c_6L_4V], (instregex "LD4Fourv(16b|8h|4s|2d)_POST$")>; // ASIMD load, 4 element, one lane, B/H // ASIMD load, 4 element, one lane, S // ASIMD load, 4 element, one lane, D -def : InstRW<[V2Write_8cyc_3L_4V], (instregex "LD4i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_3L_4V], (instregex "LD4i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_8c_3L_4V], (instregex "LD4i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_8c_3L_4V], (instregex "LD4i(8|16|32|64)_POST$")>; // ASIMD load, 4 element, all lanes, D-form, B/H/S // ASIMD load, 4 element, all lanes, D-form, D -def : InstRW<[V2Write_8cyc_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_8c_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_3L_4V], (instregex "LD4Rv(8b|4h|2s|1d)_POST$")>; // ASIMD load, 4 element, all lanes, Q-form, B/H/S // ASIMD load, 4 element, all lanes, Q-form, D -def : InstRW<[V2Write_8cyc_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_8cyc_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_8c_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_8c_4L_4V], (instregex "LD4Rv(16b|8h|4s|2d)_POST$")>; // ASIMD store instructions // ----------------------------------------------------------------------------- // ASIMD store, 1 element, multiple, 1 reg, D-form -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "ST1Onev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_1L01_1V01], (instregex "ST1Onev(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "ST1Onev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_1L01_1V01], (instregex "ST1Onev(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 1 reg, Q-form -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "ST1Onev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_1L01_1V01], (instregex "ST1Onev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "ST1Onev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_1L01_1V01], (instregex "ST1Onev(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, multiple, 2 reg, D-form -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "ST1Twov(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_1L01_1V01], (instregex "ST1Twov(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "ST1Twov(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_1L01_1V01], (instregex "ST1Twov(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 2 reg, Q-form -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "ST1Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_2L01_2V01], (instregex "ST1Twov(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "ST1Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_2L01_2V01], (instregex "ST1Twov(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, multiple, 3 reg, D-form -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "ST1Threev(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_2L01_2V01], (instregex "ST1Threev(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "ST1Threev(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_2L01_2V01], (instregex "ST1Threev(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 3 reg, Q-form -def : InstRW<[V2Write_2cyc_3L01_3V01], (instregex "ST1Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_3L01_3V01], (instregex "ST1Threev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_2c_3L01_3V01], (instregex "ST1Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_3L01_3V01], (instregex "ST1Threev(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, multiple, 4 reg, D-form -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "ST1Fourv(8b|4h|2s|1d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_2L01_2V01], (instregex "ST1Fourv(8b|4h|2s|1d)_POST$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "ST1Fourv(8b|4h|2s|1d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_2L01_2V01], (instregex "ST1Fourv(8b|4h|2s|1d)_POST$")>; // ASIMD store, 1 element, multiple, 4 reg, Q-form -def : InstRW<[V2Write_2cyc_4L01_4V01], (instregex "ST1Fourv(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_2cyc_4L01_4V01], (instregex "ST1Fourv(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_2c_4L01_4V01], (instregex "ST1Fourv(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_2c_4L01_4V01], (instregex "ST1Fourv(16b|8h|4s|2d)_POST$")>; // ASIMD store, 1 element, one lane, B/H/S // ASIMD store, 1 element, one lane, D -def : InstRW<[V2Write_4cyc_1L01_2V01], (instregex "ST1i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_4cyc_1L01_2V01], (instregex "ST1i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_4c_1L01_2V01], (instregex "ST1i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_4c_1L01_2V01], (instregex "ST1i(8|16|32|64)_POST$")>; // ASIMD store, 2 element, multiple, D-form, B/H/S -def : InstRW<[V2Write_4cyc_1L01_2V01], (instregex "ST2Twov(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, V2Write_4cyc_1L01_2V01], (instregex "ST2Twov(8b|4h|2s)_POST$")>; +def : InstRW<[V2Write_4c_1L01_2V01], (instregex "ST2Twov(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, V2Write_4c_1L01_2V01], (instregex "ST2Twov(8b|4h|2s)_POST$")>; // ASIMD store, 2 element, multiple, Q-form, B/H/S // ASIMD store, 2 element, multiple, Q-form, D -def : InstRW<[V2Write_4cyc_2L01_4V01], (instregex "ST2Twov(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_4cyc_2L01_4V01], (instregex "ST2Twov(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_4c_2L01_4V01], (instregex "ST2Twov(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_4c_2L01_4V01], (instregex "ST2Twov(16b|8h|4s|2d)_POST$")>; // ASIMD store, 2 element, one lane, B/H/S // ASIMD store, 2 element, one lane, D -def : InstRW<[V2Write_4cyc_1L01_2V01], (instregex "ST2i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_4cyc_1L01_2V01], (instregex "ST2i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_4c_1L01_2V01], (instregex "ST2i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_4c_1L01_2V01], (instregex "ST2i(8|16|32|64)_POST$")>; // ASIMD store, 3 element, multiple, D-form, B/H/S -def : InstRW<[V2Write_5cyc_2L01_4V01], (instregex "ST3Threev(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, V2Write_5cyc_2L01_4V01], (instregex "ST3Threev(8b|4h|2s)_POST$")>; +def : InstRW<[V2Write_5c_2L01_4V01], (instregex "ST3Threev(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, V2Write_5c_2L01_4V01], (instregex "ST3Threev(8b|4h|2s)_POST$")>; // ASIMD store, 3 element, multiple, Q-form, B/H/S // ASIMD store, 3 element, multiple, Q-form, D -def : InstRW<[V2Write_6cyc_3L01_6V01], (instregex "ST3Threev(16b|8h|4s|2d)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_3L01_6V01], (instregex "ST3Threev(16b|8h|4s|2d)_POST$")>; +def : InstRW<[V2Write_6c_3L01_6V01], (instregex "ST3Threev(16b|8h|4s|2d)$")>; +def : InstRW<[WriteAdr, V2Write_6c_3L01_6V01], (instregex "ST3Threev(16b|8h|4s|2d)_POST$")>; // ASIMD store, 3 element, one lane, B/H // ASIMD store, 3 element, one lane, S // ASIMD store, 3 element, one lane, D -def : InstRW<[V2Write_5cyc_2L01_4V01], (instregex "ST3i(8|16|32|64)$")>; -def : InstRW<[WriteAdr, V2Write_5cyc_2L01_4V01], (instregex "ST3i(8|16|32|64)_POST$")>; +def : InstRW<[V2Write_5c_2L01_4V01], (instregex "ST3i(8|16|32|64)$")>; +def : InstRW<[WriteAdr, V2Write_5c_2L01_4V01], (instregex "ST3i(8|16|32|64)_POST$")>; // ASIMD store, 4 element, multiple, D-form, B/H/S -def : InstRW<[V2Write_6cyc_2L01_6V01], (instregex "ST4Fourv(8b|4h|2s)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_2L01_6V01], (instregex "ST4Fourv(8b|4h|2s)_POST$")>; +def : InstRW<[V2Write_6c_2L01_6V01], (instregex "ST4Fourv(8b|4h|2s)$")>; +def : InstRW<[WriteAdr, V2Write_6c_2L01_6V01], (instregex "ST4Fourv(8b|4h|2s)_POST$")>; // ASIMD store, 4 element, multiple, Q-form, B/H/S -def : InstRW<[V2Write_7cyc_4L01_12V01], (instregex "ST4Fourv(16b|8h|4s)$")>; -def : InstRW<[WriteAdr, V2Write_7cyc_4L01_12V01], (instregex "ST4Fourv(16b|8h|4s)_POST$")>; +def : InstRW<[V2Write_7c_4L01_12V01], (instregex "ST4Fourv(16b|8h|4s)$")>; +def : InstRW<[WriteAdr, V2Write_7c_4L01_12V01], (instregex "ST4Fourv(16b|8h|4s)_POST$")>; // ASIMD store, 4 element, multiple, Q-form, D -def : InstRW<[V2Write_5cyc_4L01_8V01], (instregex "ST4Fourv(2d)$")>; -def : InstRW<[WriteAdr, V2Write_5cyc_4L01_8V01], (instregex "ST4Fourv(2d)_POST$")>; +def : InstRW<[V2Write_5c_4L01_8V01], (instregex "ST4Fourv(2d)$")>; +def : InstRW<[WriteAdr, V2Write_5c_4L01_8V01], (instregex "ST4Fourv(2d)_POST$")>; // ASIMD store, 4 element, one lane, B/H/S -def : InstRW<[V2Write_6cyc_1L01_3V01], (instregex "ST4i(8|16|32)$")>; -def : InstRW<[WriteAdr, V2Write_6cyc_1L01_3V01], (instregex "ST4i(8|16|32)_POST$")>; +def : InstRW<[V2Write_6c_1L01_3V01], (instregex "ST4i(8|16|32)$")>; +def : InstRW<[WriteAdr, V2Write_6c_1L01_3V01], (instregex "ST4i(8|16|32)_POST$")>; // ASIMD store, 4 element, one lane, D -def : InstRW<[V2Write_4cyc_2L01_4V01], (instregex "ST4i(64)$")>; -def : InstRW<[WriteAdr, V2Write_4cyc_2L01_4V01], (instregex "ST4i(64)_POST$")>; +def : InstRW<[V2Write_4c_2L01_4V01], (instregex "ST4i(64)$")>; +def : InstRW<[WriteAdr, V2Write_4c_2L01_4V01], (instregex "ST4i(64)_POST$")>; // Cryptography extensions // ----------------------------------------------------------------------------- // Crypto AES ops -def : InstRW<[V2Write_2cyc_1V], (instregex "^AES[DE]rr$", "^AESI?MCrr")>; +def : InstRW<[V2Write_2c_1V], (instregex "^AES[DE]rr$", "^AESI?MCrr")>; // Crypto polynomial (64x64) multiply long -def : InstRW<[V2Write_2cyc_1V], (instrs PMULLv1i64, PMULLv2i64)>; +def : InstRW<[V2Write_2c_1V], (instrs PMULLv1i64, PMULLv2i64)>; // Crypto SHA1 hash acceleration op // Crypto SHA1 schedule acceleration ops -def : InstRW<[V2Write_2cyc_1V0], (instregex "^SHA1(H|SU0|SU1)")>; +def : InstRW<[V2Write_2c_1V0], (instregex "^SHA1(H|SU0|SU1)")>; // Crypto SHA1 hash acceleration ops // Crypto SHA256 hash acceleration ops -def : InstRW<[V2Write_4cyc_1V0], (instregex "^SHA1[CMP]", "^SHA256H2?")>; +def : InstRW<[V2Write_4c_1V0], (instregex "^SHA1[CMP]", "^SHA256H2?")>; // Crypto SHA256 schedule acceleration ops -def : InstRW<[V2Write_2cyc_1V0], (instregex "^SHA256SU[01]")>; +def : InstRW<[V2Write_2c_1V0], (instregex "^SHA256SU[01]")>; // Crypto SHA512 hash acceleration ops -def : InstRW<[V2Write_2cyc_1V0], (instregex "^SHA512(H|H2|SU0|SU1)")>; +def : InstRW<[V2Write_2c_1V0], (instregex "^SHA512(H|H2|SU0|SU1)")>; // Crypto SHA3 ops -def : InstRW<[V2Write_2cyc_1V0], (instrs BCAX, EOR3, RAX1, XAR)>; +def : InstRW<[V2Write_2c_1V0], (instrs BCAX, EOR3, RAX1, XAR)>; // Crypto SM3 ops -def : InstRW<[V2Write_2cyc_1V0], (instregex "^SM3PARTW[12]$", "^SM3SS1$", - "^SM3TT[12][AB]$")>; +def : InstRW<[V2Write_2c_1V0], (instregex "^SM3PARTW[12]$", "^SM3SS1$", + "^SM3TT[12][AB]$")>; // Crypto SM4 ops -def : InstRW<[V2Write_4cyc_1V0], (instrs SM4E, SM4ENCKEY)>; +def : InstRW<[V2Write_4c_1V0], (instrs SM4E, SM4ENCKEY)>; // CRC // ----------------------------------------------------------------------------- @@ -2004,31 +2004,31 @@ def : InstRW<[V2Wr_CRC, V2Rd_CRC], (instregex "^CRC32")>; // ----------------------------------------------------------------------------- // Loop control, based on predicate -def : InstRW<[V2Write_2or3cyc_1M], (instrs BRKA_PPmP, BRKA_PPzP, - BRKB_PPmP, BRKB_PPzP)>; +def : InstRW<[V2Write_2or3c_1M], (instrs BRKA_PPmP, BRKA_PPzP, + BRKB_PPmP, BRKB_PPzP)>; // Loop control, based on predicate and flag setting -def : InstRW<[V2Write_3or4cyc_2M], (instrs BRKAS_PPzP, BRKBS_PPzP)>; +def : InstRW<[V2Write_3or4c_2M], (instrs BRKAS_PPzP, BRKBS_PPzP)>; // Loop control, propagating -def : InstRW<[V2Write_2or3cyc_1M0], (instrs BRKN_PPzP, BRKPA_PPzPP, - BRKPB_PPzPP)>; +def : InstRW<[V2Write_2or3c_1M0], (instrs BRKN_PPzP, BRKPA_PPzPP, + BRKPB_PPzPP)>; // Loop control, propagating and flag setting -def : InstRW<[V2Write_3or4cyc_1M0_1M], (instrs BRKNS_PPzP, BRKPAS_PPzPP, - BRKPBS_PPzPP)>; +def : InstRW<[V2Write_3or4c_1M0_1M], (instrs BRKNS_PPzP, BRKPAS_PPzPP, + BRKPBS_PPzPP)>; // Loop control, based on GPR -def : InstRW<[V2Write_3cyc_2M], +def : InstRW<[V2Write_3c_2M], (instregex "^WHILE(GE|GT|HI|HS|LE|LO|LS|LT)_P(WW|XX)_[BHSD]")>; -def : InstRW<[V2Write_3cyc_2M], (instregex "^WHILE(RW|WR)_PXX_[BHSD]")>; +def : InstRW<[V2Write_3c_2M], (instregex "^WHILE(RW|WR)_PXX_[BHSD]")>; // Loop terminate -def : InstRW<[V2Write_1cyc_2M], (instregex "^CTERM(EQ|NE)_(WW|XX)")>; +def : InstRW<[V2Write_1c_2M], (instregex "^CTERM(EQ|NE)_(WW|XX)")>; // Predicate counting scalar -def : InstRW<[V2Write_2cyc_1M], (instrs ADDPL_XXI, ADDVL_XXI, RDVLI_XI)>; -def : InstRW<[V2Write_2cyc_1M], +def : InstRW<[V2Write_2c_1M], (instrs ADDPL_XXI, ADDVL_XXI, RDVLI_XI)>; +def : InstRW<[V2Write_2c_1M], (instregex "^(CNT|SQDEC|SQINC|UQDEC|UQINC)[BHWD]_XPiI", "^SQ(DEC|INC)[BHWD]_XPiWdI", "^UQ(DEC|INC)[BHWD]_WPiI")>; @@ -2037,57 +2037,57 @@ def : InstRW<[V2Write_2cyc_1M], def : InstRW<[V2Write_IncDec], (instregex "^(DEC|INC)[BHWD]_XPiI")>; // Predicate counting scalar, active predicate -def : InstRW<[V2Write_2cyc_1M], +def : InstRW<[V2Write_2c_1M], (instregex "^CNTP_XPP_[BHSD]", "^(DEC|INC|SQDEC|SQINC|UQDEC|UQINC)P_XP_[BHSD]", "^(UQDEC|UQINC)P_WP_[BHSD]", "^(SQDEC|SQINC)P_XPWd_[BHSD]")>; // Predicate counting vector, active predicate -def : InstRW<[V2Write_7cyc_1M_1M0_1V], +def : InstRW<[V2Write_7c_1M_1M0_1V], (instregex "^(DEC|INC|SQDEC|SQINC|UQDEC|UQINC)P_ZP_[HSD]")>; // Predicate logical -def : InstRW<[V2Write_1or2cyc_1M0], +def : InstRW<[V2Write_1or2c_1M0], (instregex "^(AND|BIC|EOR|NAND|NOR|ORN|ORR)_PPzPP")>; // Predicate logical, flag setting -def : InstRW<[V2Write_1or2cyc_1M0_1M], +def : InstRW<[V2Write_1or2c_1M0_1M], (instregex "^(ANDS|BICS|EORS|NANDS|NORS|ORNS|ORRS)_PPzPP")>; // Predicate reverse -def : InstRW<[V2Write_2cyc_1M], (instregex "^REV_PP_[BHSD]")>; +def : InstRW<[V2Write_2c_1M], (instregex "^REV_PP_[BHSD]")>; // Predicate select -def : InstRW<[V2Write_1cyc_1M0], (instrs SEL_PPPP)>; +def : InstRW<[V2Write_1c_1M0], (instrs SEL_PPPP)>; // Predicate set -def : InstRW<[V2Write_2cyc_1M], (instregex "^PFALSE", "^PTRUE_[BHSD]")>; +def : InstRW<[V2Write_2c_1M], (instregex "^PFALSE", "^PTRUE_[BHSD]")>; // Predicate set/initialize, set flags -def : InstRW<[V2Write_3cyc_2M], (instregex "^PTRUES_[BHSD]")>; +def : InstRW<[V2Write_3c_2M], (instregex "^PTRUES_[BHSD]")>; // Predicate find first/next -def : InstRW<[V2Write_2cyc_1M], (instregex "^PFIRST_B", "^PNEXT_[BHSD]")>; +def : InstRW<[V2Write_2c_1M], (instregex "^PFIRST_B", "^PNEXT_[BHSD]")>; // Predicate test -def : InstRW<[V2Write_1cyc_1M], (instrs PTEST_PP)>; +def : InstRW<[V2Write_1c_1M], (instrs PTEST_PP)>; // Predicate transpose -def : InstRW<[V2Write_2cyc_1M], (instregex "^TRN[12]_PPP_[BHSD]")>; +def : InstRW<[V2Write_2c_1M], (instregex "^TRN[12]_PPP_[BHSD]")>; // Predicate unpack and widen -def : InstRW<[V2Write_2cyc_1M], (instrs PUNPKHI_PP, PUNPKLO_PP)>; +def : InstRW<[V2Write_2c_1M], (instrs PUNPKHI_PP, PUNPKLO_PP)>; // Predicate zip/unzip -def : InstRW<[V2Write_2cyc_1M], (instregex "^(ZIP|UZP)[12]_PPP_[BHSD]")>; +def : InstRW<[V2Write_2c_1M], (instregex "^(ZIP|UZP)[12]_PPP_[BHSD]")>; // SVE integer instructions // ----------------------------------------------------------------------------- // Arithmetic, absolute diff -def : InstRW<[V2Write_2cyc_1V], (instregex "^[SU]ABD_ZPmZ_[BHSD]", - "^[SU]ABD_ZPZZ_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^[SU]ABD_ZPmZ_[BHSD]", + "^[SU]ABD_ZPZZ_[BHSD]")>; // Arithmetic, absolute diff accum def : InstRW<[V2Wr_ZA, V2Rd_ZA], (instregex "^[SU]ABA_ZZZ_[BHSD]")>; @@ -2096,10 +2096,10 @@ def : InstRW<[V2Wr_ZA, V2Rd_ZA], (instregex "^[SU]ABA_ZZZ_[BHSD]")>; def : InstRW<[V2Wr_ZA, V2Rd_ZA], (instregex "^[SU]ABAL[TB]_ZZZ_[HSD]")>; // Arithmetic, absolute diff long -def : InstRW<[V2Write_2cyc_1V], (instregex "^[SU]ABDL[TB]_ZZZ_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^[SU]ABDL[TB]_ZZZ_[HSD]")>; // Arithmetic, basic -def : InstRW<[V2Write_2cyc_1V], +def : InstRW<[V2Write_2c_1V], (instregex "^(ABS|ADD|CNOT|NEG|SUB|SUBR)_ZPmZ_[BHSD]", "^(ADD|SUB)_ZZZ_[BHSD]", "^(ADD|SUB|SUBR)_ZPZZ_[BHSD]", @@ -2112,7 +2112,7 @@ def : InstRW<[V2Write_2cyc_1V], "^SSUBL(BT|TB)_ZZZ_[HSD]")>; // Arithmetic, complex -def : InstRW<[V2Write_2cyc_1V], +def : InstRW<[V2Write_2c_1V], (instregex "^R?(ADD|SUB)HN[BT]_ZZZ_[BHS]", "^SQ(ABS|ADD|NEG|SUB|SUBR)_ZPmZ_[BHSD]", "^[SU]Q(ADD|SUB)_ZZZ_[BHSD]", @@ -2121,17 +2121,17 @@ def : InstRW<[V2Write_2cyc_1V], "^(UQSUB|UQSUBR)_ZPmZ_[BHSD]")>; // Arithmetic, large integer -def : InstRW<[V2Write_2cyc_1V], (instregex "^(AD|SB)CL[BT]_ZZZ_[SD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^(AD|SB)CL[BT]_ZZZ_[SD]")>; // Arithmetic, pairwise add -def : InstRW<[V2Write_2cyc_1V], (instregex "^ADDP_ZPmZ_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^ADDP_ZPmZ_[BHSD]")>; // Arithmetic, pairwise add and accum long def : InstRW<[V2Wr_ZPA, ReadDefault, V2Rd_ZPA], (instregex "^[SU]ADALP_ZPmZ_[HSD]")>; // Arithmetic, shift -def : InstRW<[V2Write_2cyc_1V13], +def : InstRW<[V2Write_2c_1V13], (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]", "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]", "^(ASR|LSL|LSR)_ZPmI_[BHSD]", @@ -2144,14 +2144,14 @@ def : InstRW<[V2Write_2cyc_1V13], def : InstRW<[V2Wr_ZSA, V2Rd_ZSA], (instregex "^[SU]R?SRA_ZZI_[BHSD]")>; // Arithmetic, shift by immediate -def : InstRW<[V2Write_2cyc_1V13], (instregex "^SHRN[BT]_ZZI_[BHS]", - "^[SU]SHLL[BT]_ZZI_[HSD]")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^SHRN[BT]_ZZI_[BHS]", + "^[SU]SHLL[BT]_ZZI_[HSD]")>; // Arithmetic, shift by immediate and insert -def : InstRW<[V2Write_2cyc_1V13], (instregex "^(SLI|SRI)_ZZI_[BHSD]")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^(SLI|SRI)_ZZI_[BHSD]")>; // Arithmetic, shift complex -def : InstRW<[V2Write_4cyc_1V13], +def : InstRW<[V2Write_4c_1V13], (instregex "^(SQ)?RSHRU?N[BT]_ZZI_[BHS]", "^(SQRSHL|SQRSHLR|SQSHL|SQSHLR|UQRSHL|UQRSHLR|UQSHL|UQSHLR)_ZPmZ_[BHSD]", "^[SU]QR?SHL_ZPZZ_[BHSD]", @@ -2160,32 +2160,32 @@ def : InstRW<[V2Write_4cyc_1V13], "^UQR?SHRN[BT]_ZZI_[BHS]")>; // Arithmetic, shift right for divide -def : InstRW<[V2Write_4cyc_1V13], (instregex "^ASRD_(ZPmI|ZPZI)_[BHSD]")>; +def : InstRW<[V2Write_4c_1V13], (instregex "^ASRD_(ZPmI|ZPZI)_[BHSD]")>; // Arithmetic, shift rounding -def : InstRW<[V2Write_4cyc_1V13], (instregex "^[SU]RSHLR?_ZPmZ_[BHSD]", - "^[SU]RSHL_ZPZZ_[BHSD]", - "^[SU]RSHR_(ZPmI|ZPZI)_[BHSD]")>; +def : InstRW<[V2Write_4c_1V13], (instregex "^[SU]RSHLR?_ZPmZ_[BHSD]", + "^[SU]RSHL_ZPZZ_[BHSD]", + "^[SU]RSHR_(ZPmI|ZPZI)_[BHSD]")>; // Bit manipulation -def : InstRW<[V2Write_6cyc_2V1], (instregex "^(BDEP|BEXT|BGRP)_ZZZ_[BHSD]")>; +def : InstRW<[V2Write_6c_2V1], (instregex "^(BDEP|BEXT|BGRP)_ZZZ_[BHSD]")>; // Bitwise select -def : InstRW<[V2Write_2cyc_1V], (instregex "^(BSL|BSL1N|BSL2N|NBSL)_ZZZZ")>; +def : InstRW<[V2Write_2c_1V], (instregex "^(BSL|BSL1N|BSL2N|NBSL)_ZZZZ")>; // Count/reverse bits -def : InstRW<[V2Write_2cyc_1V], (instregex "^(CLS|CLZ|CNT|RBIT)_ZPmZ_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^(CLS|CLZ|CNT|RBIT)_ZPmZ_[BHSD]")>; // Broadcast logical bitmask immediate to vector -def : InstRW<[V2Write_2cyc_1V], (instrs DUPM_ZI)>; +def : InstRW<[V2Write_2c_1V], (instrs DUPM_ZI)>; // Compare and set flags -def : InstRW<[V2Write_4or5cyc_1V0_1M0], +def : InstRW<[V2Write_4or5c_1V0_1M0], (instregex "^CMP(EQ|GE|GT|HI|HS|LE|LO|LS|LT|NE)_PPzZ[IZ]_[BHSD]", "^CMP(EQ|GE|GT|HI|HS|LE|LO|LS|LT|NE)_WIDE_PPzZZ_[BHS]")>; // Complex add -def : InstRW<[V2Write_2cyc_1V], (instregex "^(SQ)?CADD_ZZI_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^(SQ)?CADD_ZZI_[BHSD]")>; // Complex dot product 8-bit element def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instrs CDOT_ZZZ_S, CDOT_ZZZI_S)>; @@ -2201,37 +2201,37 @@ def : InstRW<[V2Wr_ZCMABHS, V2Rd_ZCMABHS], (instregex "^CMLA_ZZZ_[BHS]", def : InstRW<[V2Wr_ZCMAD, V2Rd_ZCMAD], (instrs CMLA_ZZZ_D)>; // Conditional extract operations, scalar form -def : InstRW<[V2Write_8cyc_1M0_1V01], (instregex "^CLAST[AB]_RPZ_[BHSD]")>; +def : InstRW<[V2Write_8c_1M0_1V01], (instregex "^CLAST[AB]_RPZ_[BHSD]")>; // Conditional extract operations, SIMD&FP scalar and vector forms -def : InstRW<[V2Write_3cyc_1V1], (instregex "^CLAST[AB]_[VZ]PZ_[BHSD]", - "^COMPACT_ZPZ_[SD]", - "^SPLICE_ZPZZ?_[BHSD]")>; +def : InstRW<[V2Write_3c_1V1], (instregex "^CLAST[AB]_[VZ]PZ_[BHSD]", + "^COMPACT_ZPZ_[SD]", + "^SPLICE_ZPZZ?_[BHSD]")>; // Convert to floating point, 64b to float or convert to double -def : InstRW<[V2Write_3cyc_1V02], (instregex "^[SU]CVTF_ZPmZ_Dto[HSD]", +def : InstRW<[V2Write_3c_1V02], (instregex "^[SU]CVTF_ZPmZ_Dto[HSD]", "^[SU]CVTF_ZPmZ_StoD")>; // Convert to floating point, 32b to single or half -def : InstRW<[V2Write_4cyc_2V02], (instregex "^[SU]CVTF_ZPmZ_Sto[HS]")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^[SU]CVTF_ZPmZ_Sto[HS]")>; // Convert to floating point, 16b to half -def : InstRW<[V2Write_6cyc_4V02], (instregex "^[SU]CVTF_ZPmZ_HtoH")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^[SU]CVTF_ZPmZ_HtoH")>; // Copy, scalar -def : InstRW<[V2Write_5cyc_1M0_1V], (instregex "^CPY_ZPmR_[BHSD]")>; +def : InstRW<[V2Write_5c_1M0_1V], (instregex "^CPY_ZPmR_[BHSD]")>; // Copy, scalar SIMD&FP or imm -def : InstRW<[V2Write_2cyc_1V], (instregex "^CPY_ZPm[IV]_[BHSD]", - "^CPY_ZPzI_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^CPY_ZPm[IV]_[BHSD]", + "^CPY_ZPzI_[BHSD]")>; // Divides, 32 bit -def : InstRW<[V2Write_12cyc_1V0], (instregex "^[SU]DIVR?_ZPmZ_S", - "^[SU]DIV_ZPZZ_S")>; +def : InstRW<[V2Write_12c_1V0], (instregex "^[SU]DIVR?_ZPmZ_S", + "^[SU]DIV_ZPZZ_S")>; // Divides, 64 bit -def : InstRW<[V2Write_20cyc_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", - "^[SU]DIV_ZPZZ_D")>; +def : InstRW<[V2Write_20c_1V0], (instregex "^[SU]DIVR?_ZPmZ_D", + "^[SU]DIV_ZPZZ_D")>; // Dot product, 8 bit def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_S")>; @@ -2243,52 +2243,52 @@ def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ def : InstRW<[V2Wr_ZDOTH, V2Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_D")>; // Duplicate, immediate and indexed form -def : InstRW<[V2Write_2cyc_1V], (instregex "^DUP_ZI_[BHSD]", - "^DUP_ZZI_[BHSDQ]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^DUP_ZI_[BHSD]", + "^DUP_ZZI_[BHSDQ]")>; // Duplicate, scalar form -def : InstRW<[V2Write_3cyc_1M0], (instregex "^DUP_ZR_[BHSD]")>; +def : InstRW<[V2Write_3c_1M0], (instregex "^DUP_ZR_[BHSD]")>; // Extend, sign or zero -def : InstRW<[V2Write_2cyc_1V13], (instregex "^[SU]XTB_ZPmZ_[HSD]", - "^[SU]XTH_ZPmZ_[SD]", - "^[SU]XTW_ZPmZ_[D]")>; +def : InstRW<[V2Write_2c_1V13], (instregex "^[SU]XTB_ZPmZ_[HSD]", + "^[SU]XTH_ZPmZ_[SD]", + "^[SU]XTW_ZPmZ_[D]")>; // Extract -def : InstRW<[V2Write_2cyc_1V], (instrs EXT_ZZI, EXT_ZZI_B)>; +def : InstRW<[V2Write_2c_1V], (instrs EXT_ZZI, EXT_ZZI_B)>; // Extract narrow saturating -def : InstRW<[V2Write_4cyc_1V13], (instregex "^[SU]QXTN[BT]_ZZ_[BHS]", - "^SQXTUN[BT]_ZZ_[BHS]")>; +def : InstRW<[V2Write_4c_1V13], (instregex "^[SU]QXTN[BT]_ZZ_[BHS]", + "^SQXTUN[BT]_ZZ_[BHS]")>; // Extract/insert operation, SIMD and FP scalar form -def : InstRW<[V2Write_3cyc_1V1], (instregex "^LAST[AB]_VPZ_[BHSD]", - "^INSR_ZV_[BHSD]")>; +def : InstRW<[V2Write_3c_1V1], (instregex "^LAST[AB]_VPZ_[BHSD]", + "^INSR_ZV_[BHSD]")>; // Extract/insert operation, scalar -def : InstRW<[V2Write_6cyc_1V1_1M0], (instregex "^LAST[AB]_RPZ_[BHSD]", - "^INSR_ZR_[BHSD]")>; +def : InstRW<[V2Write_6c_1V1_1M0], (instregex "^LAST[AB]_RPZ_[BHSD]", + "^INSR_ZR_[BHSD]")>; // Histogram operations -def : InstRW<[V2Write_2cyc_1V], (instregex "^HISTCNT_ZPzZZ_[SD]", - "^HISTSEG_ZZZ")>; +def : InstRW<[V2Write_2c_1V], (instregex "^HISTCNT_ZPzZZ_[SD]", + "^HISTSEG_ZZZ")>; // Horizontal operations, B, H, S form, immediate operands only -def : InstRW<[V2Write_4cyc_1V02], (instregex "^INDEX_II_[BHS]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^INDEX_II_[BHS]")>; // Horizontal operations, B, H, S form, scalar, immediate operands/ scalar // operands only / immediate, scalar operands -def : InstRW<[V2Write_7cyc_1M0_1V02], (instregex "^INDEX_(IR|RI|RR)_[BHS]")>; +def : InstRW<[V2Write_7c_1M0_1V02], (instregex "^INDEX_(IR|RI|RR)_[BHS]")>; // Horizontal operations, D form, immediate operands only -def : InstRW<[V2Write_5cyc_2V02], (instrs INDEX_II_D)>; +def : InstRW<[V2Write_5c_2V02], (instrs INDEX_II_D)>; // Horizontal operations, D form, scalar, immediate operands)/ scalar operands // only / immediate, scalar operands -def : InstRW<[V2Write_8cyc_2M0_2V02], (instregex "^INDEX_(IR|RI|RR)_D")>; +def : InstRW<[V2Write_8c_2M0_2V02], (instregex "^INDEX_(IR|RI|RR)_D")>; // Logical -def : InstRW<[V2Write_2cyc_1V], +def : InstRW<[V2Write_2c_1V], (instregex "^(AND|EOR|ORR)_ZI", "^(AND|BIC|EOR|ORR)_ZZZ", "^EOR(BT|TB)_ZZZ_[BHSD]", @@ -2296,37 +2296,37 @@ def : InstRW<[V2Write_2cyc_1V], "^NOT_ZPmZ_[BHSD]")>; // Max/min, basic and pairwise -def : InstRW<[V2Write_2cyc_1V], (instregex "^[SU](MAX|MIN)_ZI_[BHSD]", - "^[SU](MAX|MIN)P?_ZPmZ_[BHSD]", - "^[SU](MAX|MIN)_ZPZZ_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^[SU](MAX|MIN)_ZI_[BHSD]", + "^[SU](MAX|MIN)P?_ZPmZ_[BHSD]", + "^[SU](MAX|MIN)_ZPZZ_[BHSD]")>; // Matching operations // FIXME: SOG p. 44, n. 5: If the consuming instruction has a flag source, the // latency for this instruction is 4 cycles. -def : InstRW<[V2Write_2or3cyc_1V0_1M], (instregex "^N?MATCH_PPzZZ_[BH]")>; +def : InstRW<[V2Write_2or3c_1V0_1M], (instregex "^N?MATCH_PPzZZ_[BH]")>; // Matrix multiply-accumulate def : InstRW<[V2Wr_ZMMA, V2Rd_ZMMA], (instrs SMMLA_ZZZ, UMMLA_ZZZ, USMMLA_ZZZ)>; // Move prefix -def : InstRW<[V2Write_2cyc_1V], (instregex "^MOVPRFX_ZP[mz]Z_[BHSD]", - "^MOVPRFX_ZZ")>; +def : InstRW<[V2Write_2c_1V], (instregex "^MOVPRFX_ZP[mz]Z_[BHSD]", + "^MOVPRFX_ZZ")>; // Multiply, B, H, S element size -def : InstRW<[V2Write_4cyc_1V02], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_[BHS]", - "^MUL_ZPZZ_[BHS]", - "^[SU]MULH_(ZPmZ|ZZZ)_[BHS]", - "^[SU]MULH_ZPZZ_[BHS]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_[BHS]", + "^MUL_ZPZZ_[BHS]", + "^[SU]MULH_(ZPmZ|ZZZ)_[BHS]", + "^[SU]MULH_ZPZZ_[BHS]")>; // Multiply, D element size -def : InstRW<[V2Write_5cyc_2V02], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_D", - "^MUL_ZPZZ_D", - "^[SU]MULH_(ZPmZ|ZZZ)_D", - "^[SU]MULH_ZPZZ_D")>; +def : InstRW<[V2Write_5c_2V02], (instregex "^MUL_(ZI|ZPmZ|ZZZI|ZZZ)_D", + "^MUL_ZPZZ_D", + "^[SU]MULH_(ZPmZ|ZZZ)_D", + "^[SU]MULH_ZPZZ_D")>; // Multiply long -def : InstRW<[V2Write_4cyc_1V02], (instregex "^[SU]MULL[BT]_ZZZI_[SD]", - "^[SU]MULL[BT]_ZZZ_[HSD]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^[SU]MULL[BT]_ZZZI_[SD]", + "^[SU]MULL[BT]_ZZZ_[HSD]")>; // Multiply accumulate, B, H, S element size def : InstRW<[V2Wr_ZMABHS, V2Rd_ZMABHS], @@ -2350,15 +2350,15 @@ def : InstRW<[V2Wr_ZMASQL, V2Rd_ZMASQ], "^SQDML[AS]L[BT]_ZZZI_[SD]")>; // Multiply saturating doubling high, B, H, S element size -def : InstRW<[V2Write_4cyc_1V02], (instregex "^SQDMULH_ZZZ_[BHS]", - "^SQDMULH_ZZZI_[HS]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^SQDMULH_ZZZ_[BHS]", + "^SQDMULH_ZZZI_[HS]")>; // Multiply saturating doubling high, D element size -def : InstRW<[V2Write_5cyc_2V02], (instrs SQDMULH_ZZZ_D, SQDMULH_ZZZI_D)>; +def : InstRW<[V2Write_5c_2V02], (instrs SQDMULH_ZZZ_D, SQDMULH_ZZZI_D)>; // Multiply saturating doubling long -def : InstRW<[V2Write_4cyc_1V02], (instregex "^SQDMULL[BT]_ZZZ_[HSD]", - "^SQDMULL[BT]_ZZZI_[SD]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^SQDMULL[BT]_ZZZ_[HSD]", + "^SQDMULL[BT]_ZZZI_[SD]")>; // Multiply saturating rounding doubling regular/complex accumulate, B, H, S // element size @@ -2373,157 +2373,157 @@ def : InstRW<[V2Wr_ZMASQD, V2Rd_ZMASQ], (instregex "^SQRDML[AS]H_ZZZI?_D", "^SQRDCMLAH_ZZZ_D")>; // Multiply saturating rounding doubling regular/complex, B, H, S element size -def : InstRW<[V2Write_4cyc_1V02], (instregex "^SQRDMULH_ZZZ_[BHS]", - "^SQRDMULH_ZZZI_[HS]")>; +def : InstRW<[V2Write_4c_1V02], (instregex "^SQRDMULH_ZZZ_[BHS]", + "^SQRDMULH_ZZZI_[HS]")>; // Multiply saturating rounding doubling regular/complex, D element size -def : InstRW<[V2Write_5cyc_2V02], (instregex "^SQRDMULH_ZZZI?_D")>; +def : InstRW<[V2Write_5c_2V02], (instregex "^SQRDMULH_ZZZI?_D")>; // Multiply/multiply long, (8x8) polynomial -def : InstRW<[V2Write_2cyc_1V23], (instregex "^PMUL_ZZZ_B", - "^PMULL[BT]_ZZZ_[HDQ]")>; +def : InstRW<[V2Write_2c_1V23], (instregex "^PMUL_ZZZ_B", + "^PMULL[BT]_ZZZ_[HDQ]")>; // Predicate counting vector -def : InstRW<[V2Write_2cyc_1V], (instregex "^([SU]Q)?(DEC|INC)[HWD]_ZPiI")>; +def : InstRW<[V2Write_2c_1V], (instregex "^([SU]Q)?(DEC|INC)[HWD]_ZPiI")>; // Reciprocal estimate -def : InstRW<[V2Write_4cyc_2V02], (instregex "^URECPE_ZPmZ_S", "^URSQRTE_ZPmZ_S")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^URECPE_ZPmZ_S", "^URSQRTE_ZPmZ_S")>; // Reduction, arithmetic, B form -def : InstRW<[V2Write_9cyc_2V_4V13], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_B")>; +def : InstRW<[V2Write_9c_2V_4V13], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_B")>; // Reduction, arithmetic, H form -def : InstRW<[V2Write_8cyc_2V_2V13], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_H")>; +def : InstRW<[V2Write_8c_2V_2V13], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_H")>; // Reduction, arithmetic, S form -def : InstRW<[V2Write_6cyc_2V_2V13], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_S")>; +def : InstRW<[V2Write_6c_2V_2V13], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_S")>; // Reduction, arithmetic, D form -def : InstRW<[V2Write_4cyc_2V], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_D")>; +def : InstRW<[V2Write_4c_2V], (instregex "^[SU](ADD|MAX|MIN)V_VPZ_D")>; // Reduction, logical -def : InstRW<[V2Write_6cyc_1V_1V13], (instregex "^(AND|EOR|OR)V_VPZ_[BHSD]")>; +def : InstRW<[V2Write_6c_1V_1V13], (instregex "^(AND|EOR|OR)V_VPZ_[BHSD]")>; // Reverse, vector -def : InstRW<[V2Write_2cyc_1V], (instregex "^REV_ZZ_[BHSD]", - "^REVB_ZPmZ_[HSD]", - "^REVH_ZPmZ_[SD]", - "^REVW_ZPmZ_D")>; +def : InstRW<[V2Write_2c_1V], (instregex "^REV_ZZ_[BHSD]", + "^REVB_ZPmZ_[HSD]", + "^REVH_ZPmZ_[SD]", + "^REVW_ZPmZ_D")>; // Select, vector form -def : InstRW<[V2Write_2cyc_1V], (instregex "^SEL_ZPZZ_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^SEL_ZPZZ_[BHSD]")>; // Table lookup -def : InstRW<[V2Write_2cyc_1V], (instregex "^TBL_ZZZZ?_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^TBL_ZZZZ?_[BHSD]")>; // Table lookup extension -def : InstRW<[V2Write_2cyc_1V], (instregex "^TBX_ZZZ_[BHSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^TBX_ZZZ_[BHSD]")>; // Transpose, vector form -def : InstRW<[V2Write_2cyc_1V], (instregex "^TRN[12]_ZZZ_[BHSDQ]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^TRN[12]_ZZZ_[BHSDQ]")>; // Unpack and extend -def : InstRW<[V2Write_2cyc_1V], (instregex "^[SU]UNPK(HI|LO)_ZZ_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^[SU]UNPK(HI|LO)_ZZ_[HSD]")>; // Zip/unzip -def : InstRW<[V2Write_2cyc_1V], (instregex "^(UZP|ZIP)[12]_ZZZ_[BHSDQ]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^(UZP|ZIP)[12]_ZZZ_[BHSDQ]")>; // SVE floating-point instructions // ----------------------------------------------------------------------------- // Floating point absolute value/difference -def : InstRW<[V2Write_2cyc_1V], (instregex "^FAB[SD]_ZPmZ_[HSD]", - "^FABD_ZPZZ_[HSD]", - "^FABS_ZPmZ_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^FAB[SD]_ZPmZ_[HSD]", + "^FABD_ZPZZ_[HSD]", + "^FABS_ZPmZ_[HSD]")>; // Floating point arithmetic -def : InstRW<[V2Write_2cyc_1V], (instregex "^F(ADD|SUB)_(ZPm[IZ]|ZZZ)_[HSD]", - "^F(ADD|SUB)_ZPZ[IZ]_[HSD]", - "^FADDP_ZPmZZ_[HSD]", - "^FNEG_ZPmZ_[HSD]", - "^FSUBR_ZPm[IZ]_[HSD]", - "^FSUBR_(ZPZI|ZPZZ)_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^F(ADD|SUB)_(ZPm[IZ]|ZZZ)_[HSD]", + "^F(ADD|SUB)_ZPZ[IZ]_[HSD]", + "^FADDP_ZPmZZ_[HSD]", + "^FNEG_ZPmZ_[HSD]", + "^FSUBR_ZPm[IZ]_[HSD]", + "^FSUBR_(ZPZI|ZPZZ)_[HSD]")>; // Floating point associative add, F16 -def : InstRW<[V2Write_10cyc_1V1_9rc], (instrs FADDA_VPZ_H)>; +def : InstRW<[V2Write_10c_1V1_9rc], (instrs FADDA_VPZ_H)>; // Floating point associative add, F32 -def : InstRW<[V2Write_6cyc_1V1_5rc], (instrs FADDA_VPZ_S)>; +def : InstRW<[V2Write_6c_1V1_5rc], (instrs FADDA_VPZ_S)>; // Floating point associative add, F64 -def : InstRW<[V2Write_4cyc_1V], (instrs FADDA_VPZ_D)>; +def : InstRW<[V2Write_4c_1V], (instrs FADDA_VPZ_D)>; // Floating point compare -def : InstRW<[V2Write_2cyc_1V0], (instregex "^FACG[ET]_PPzZZ_[HSD]", - "^FCM(EQ|GE|GT|NE)_PPzZ[0Z]_[HSD]", - "^FCM(LE|LT)_PPzZ0_[HSD]", - "^FCMUO_PPzZZ_[HSD]")>; +def : InstRW<[V2Write_2c_1V0], (instregex "^FACG[ET]_PPzZZ_[HSD]", + "^FCM(EQ|GE|GT|NE)_PPzZ[0Z]_[HSD]", + "^FCM(LE|LT)_PPzZ0_[HSD]", + "^FCMUO_PPzZZ_[HSD]")>; // Floating point complex add -def : InstRW<[V2Write_3cyc_1V], (instregex "^FCADD_ZPmZ_[HSD]")>; +def : InstRW<[V2Write_3c_1V], (instregex "^FCADD_ZPmZ_[HSD]")>; // Floating point complex multiply add def : InstRW<[V2Wr_ZFCMA, ReadDefault, V2Rd_ZFCMA], (instregex "^FCMLA_ZPmZZ_[HSD]")>; def : InstRW<[V2Wr_ZFCMA, V2Rd_ZFCMA], (instregex "^FCMLA_ZZZI_[HS]")>; // Floating point convert, long or narrow (F16 to F32 or F32 to F16) -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FCVT_ZPmZ_(HtoS|StoH)", - "^FCVTLT_ZPmZ_HtoS", - "^FCVTNT_ZPmZ_StoH")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FCVT_ZPmZ_(HtoS|StoH)", + "^FCVTLT_ZPmZ_HtoS", + "^FCVTNT_ZPmZ_StoH")>; // Floating point convert, long or narrow (F16 to F64, F32 to F64, F64 to F32 // or F64 to F16) -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FCVT_ZPmZ_(HtoD|StoD|DtoS|DtoH)", - "^FCVTLT_ZPmZ_StoD", - "^FCVTNT_ZPmZ_DtoS")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FCVT_ZPmZ_(HtoD|StoD|DtoS|DtoH)", + "^FCVTLT_ZPmZ_StoD", + "^FCVTNT_ZPmZ_DtoS")>; // Floating point convert, round to odd -def : InstRW<[V2Write_3cyc_1V02], (instrs FCVTX_ZPmZ_DtoS, FCVTXNT_ZPmZ_DtoS)>; +def : InstRW<[V2Write_3c_1V02], (instrs FCVTX_ZPmZ_DtoS, FCVTXNT_ZPmZ_DtoS)>; // Floating point base2 log, F16 -def : InstRW<[V2Write_6cyc_4V02], (instregex "^FLOGB_(ZPmZ|ZPZZ)_H")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^FLOGB_(ZPmZ|ZPZZ)_H")>; // Floating point base2 log, F32 -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FLOGB_(ZPmZ|ZPZZ)_S")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FLOGB_(ZPmZ|ZPZZ)_S")>; // Floating point base2 log, F64 -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FLOGB_(ZPmZ|ZPZZ)_D")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FLOGB_(ZPmZ|ZPZZ)_D")>; // Floating point convert to integer, F16 -def : InstRW<[V2Write_6cyc_4V02], (instregex "^FCVTZ[SU]_ZPmZ_HtoH")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^FCVTZ[SU]_ZPmZ_HtoH")>; // Floating point convert to integer, F32 -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FCVTZ[SU]_ZPmZ_(HtoS|StoS)")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FCVTZ[SU]_ZPmZ_(HtoS|StoS)")>; // Floating point convert to integer, F64 -def : InstRW<[V2Write_3cyc_1V02], +def : InstRW<[V2Write_3c_1V02], (instregex "^FCVTZ[SU]_ZPmZ_(HtoD|StoD|DtoS|DtoD)")>; // Floating point copy -def : InstRW<[V2Write_2cyc_1V], (instregex "^FCPY_ZPmI_[HSD]", - "^FDUP_ZI_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^FCPY_ZPmI_[HSD]", + "^FDUP_ZI_[HSD]")>; // Floating point divide, F16 -def : InstRW<[V2Write_13cyc_1V02_12rc], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_H")>; +def : InstRW<[V2Write_13c_1V02_12rc], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_H")>; // Floating point divide, F32 -def : InstRW<[V2Write_10cyc_1V02_9rc], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_S")>; +def : InstRW<[V2Write_10c_1V02_9rc], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_S")>; // Floating point divide, F64 -def : InstRW<[V2Write_15cyc_1V02_14rc], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_D")>; +def : InstRW<[V2Write_15c_1V02_14rc], (instregex "^FDIVR?_(ZPmZ|ZPZZ)_D")>; // Floating point min/max pairwise -def : InstRW<[V2Write_2cyc_1V], (instregex "^F(MAX|MIN)(NM)?P_ZPmZZ_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^F(MAX|MIN)(NM)?P_ZPmZZ_[HSD]")>; // Floating point min/max -def : InstRW<[V2Write_2cyc_1V], (instregex "^F(MAX|MIN)(NM)?_ZPm[IZ]_[HSD]", - "^F(MAX|MIN)(NM)?_ZPZ[IZ]_[HSD]")>; +def : InstRW<[V2Write_2c_1V], (instregex "^F(MAX|MIN)(NM)?_ZPm[IZ]_[HSD]", + "^F(MAX|MIN)(NM)?_ZPZ[IZ]_[HSD]")>; // Floating point multiply -def : InstRW<[V2Write_3cyc_1V], (instregex "^(FSCALE|FMULX)_ZPmZ_[HSD]", - "^FMULX_ZPZZ_[HSD]", - "^FMUL_(ZPm[IZ]|ZZZI?)_[HSD]", - "^FMUL_ZPZ[IZ]_[HSD]")>; +def : InstRW<[V2Write_3c_1V], (instregex "^(FSCALE|FMULX)_ZPmZ_[HSD]", + "^FMULX_ZPZZ_[HSD]", + "^FMUL_(ZPm[IZ]|ZZZI?)_[HSD]", + "^FMUL_ZPZ[IZ]_[HSD]")>; // Floating point multiply accumulate def : InstRW<[V2Wr_ZFMA, ReadDefault, V2Rd_ZFMA], @@ -2537,61 +2537,61 @@ def : InstRW<[V2Wr_ZFMA, V2Rd_ZFMA], def : InstRW<[V2Wr_ZFMAL, V2Rd_ZFMAL], (instregex "^FML[AS]L[BT]_ZZZI?_SHH")>; // Floating point reciprocal estimate, F16 -def : InstRW<[V2Write_6cyc_4V02], (instregex "^FR(ECP|SQRT)E_ZZ_H", "^FRECPX_ZPmZ_H")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^FR(ECP|SQRT)E_ZZ_H", "^FRECPX_ZPmZ_H")>; // Floating point reciprocal estimate, F32 -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FR(ECP|SQRT)E_ZZ_S", "^FRECPX_ZPmZ_S")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FR(ECP|SQRT)E_ZZ_S", "^FRECPX_ZPmZ_S")>; // Floating point reciprocal estimate, F64 -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FR(ECP|SQRT)E_ZZ_D", "^FRECPX_ZPmZ_D")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FR(ECP|SQRT)E_ZZ_D", "^FRECPX_ZPmZ_D")>; // Floating point reciprocal step -def : InstRW<[V2Write_4cyc_1V], (instregex "^F(RECPS|RSQRTS)_ZZZ_[HSD]")>; +def : InstRW<[V2Write_4c_1V], (instregex "^F(RECPS|RSQRTS)_ZZZ_[HSD]")>; // Floating point reduction, F16 -def : InstRW<[V2Write_8cyc_4V], +def : InstRW<[V2Write_8c_4V], (instregex "^(FADDV|FMAXNMV|FMAXV|FMINNMV|FMINV)_VPZ_H")>; // Floating point reduction, F32 -def : InstRW<[V2Write_6cyc_3V], +def : InstRW<[V2Write_6c_3V], (instregex "^(FADDV|FMAXNMV|FMAXV|FMINNMV|FMINV)_VPZ_S")>; // Floating point reduction, F64 -def : InstRW<[V2Write_4cyc_2V], +def : InstRW<[V2Write_4c_2V], (instregex "^(FADDV|FMAXNMV|FMAXV|FMINNMV|FMINV)_VPZ_D")>; // Floating point round to integral, F16 -def : InstRW<[V2Write_6cyc_4V02], (instregex "^FRINT[AIMNPXZ]_ZPmZ_H")>; +def : InstRW<[V2Write_6c_4V02], (instregex "^FRINT[AIMNPXZ]_ZPmZ_H")>; // Floating point round to integral, F32 -def : InstRW<[V2Write_4cyc_2V02], (instregex "^FRINT[AIMNPXZ]_ZPmZ_S")>; +def : InstRW<[V2Write_4c_2V02], (instregex "^FRINT[AIMNPXZ]_ZPmZ_S")>; // Floating point round to integral, F64 -def : InstRW<[V2Write_3cyc_1V02], (instregex "^FRINT[AIMNPXZ]_ZPmZ_D")>; +def : InstRW<[V2Write_3c_1V02], (instregex "^FRINT[AIMNPXZ]_ZPmZ_D")>; // Floating point square root, F16 -def : InstRW<[V2Write_13cyc_1V02_12rc], (instregex "^FSQRT_ZPmZ_H")>; +def : InstRW<[V2Write_13c_1V02_12rc], (instregex "^FSQRT_ZPmZ_H")>; // Floating point square root, F32 -def : InstRW<[V2Write_10cyc_1V02_9rc], (instregex "^FSQRT_ZPmZ_S")>; +def : InstRW<[V2Write_10c_1V02_9rc], (instregex "^FSQRT_ZPmZ_S")>; // Floating point square root, F64 -def : InstRW<[V2Write_16cyc_1V02_14rc], (instregex "^FSQRT_ZPmZ_D")>; +def : InstRW<[V2Write_16c_1V02_14rc], (instregex "^FSQRT_ZPmZ_D")>; // Floating point trigonometric exponentiation -def : InstRW<[V2Write_3cyc_1V1], (instregex "^FEXPA_ZZ_[HSD]")>; +def : InstRW<[V2Write_3c_1V1], (instregex "^FEXPA_ZZ_[HSD]")>; // Floating point trigonometric multiply add -def : InstRW<[V2Write_4cyc_1V], (instregex "^FTMAD_ZZI_[HSD]")>; +def : InstRW<[V2Write_4c_1V], (instregex "^FTMAD_ZZI_[HSD]")>; // Floating point trigonometric, miscellaneous -def : InstRW<[V2Write_3cyc_1V], (instregex "^FTS(MUL|SEL)_ZZZ_[HSD]")>; +def : InstRW<[V2Write_3c_1V], (instregex "^FTS(MUL|SEL)_ZZZ_[HSD]")>; // SVE BFloat16 (BF16) instructions // ----------------------------------------------------------------------------- // Convert, F32 to BF16 -def : InstRW<[V2Write_4cyc_1V02], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; +def : InstRW<[V2Write_4c_1V02], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>; // Dot product def : InstRW<[V2Wr_ZBFDOT, V2Rd_ZBFDOT], (instrs BFDOT_ZZI, BFDOT_ZZZ)>; @@ -2606,101 +2606,101 @@ def : InstRW<[V2Wr_ZBFMAL, V2Rd_ZBFMAL], (instregex "^BFMLAL[BT]_ZZZI?")>; // ----------------------------------------------------------------------------- // Load vector -def : InstRW<[V2Write_6cyc_1L], (instrs LDR_ZXI)>; +def : InstRW<[V2Write_6c_1L], (instrs LDR_ZXI)>; // Load predicate -def : InstRW<[V2Write_6cyc_1L_1M], (instrs LDR_PXI)>; +def : InstRW<[V2Write_6c_1L_1M], (instrs LDR_PXI)>; // Contiguous load, scalar + imm -def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1[BHWD]_IMM$", - "^LD1S?B_[HSD]_IMM$", - "^LD1S?H_[SD]_IMM$", - "^LD1S?W_D_IMM$" )>; +def : InstRW<[V2Write_6c_1L], (instregex "^LD1[BHWD]_IMM$", + "^LD1S?B_[HSD]_IMM$", + "^LD1S?H_[SD]_IMM$", + "^LD1S?W_D_IMM$" )>; // Contiguous load, scalar + scalar -def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1[BHWD]$", - "^LD1S?B_[HSD]$", - "^LD1S?H_[SD]$", - "^LD1S?W_D$" )>; +def : InstRW<[V2Write_6c_1L], (instregex "^LD1[BHWD]$", + "^LD1S?B_[HSD]$", + "^LD1S?H_[SD]$", + "^LD1S?W_D$" )>; // Contiguous load broadcast, scalar + imm -def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1R[BHWD]_IMM$", - "^LD1RS?B_[HSD]_IMM$", - "^LD1RS?H_[SD]_IMM$", - "^LD1RW_D_IMM$", - "^LD1RSW_IMM$", - "^LD1RQ_[BHWD]_IMM$")>; +def : InstRW<[V2Write_6c_1L], (instregex "^LD1R[BHWD]_IMM$", + "^LD1RS?B_[HSD]_IMM$", + "^LD1RS?H_[SD]_IMM$", + "^LD1RW_D_IMM$", + "^LD1RSW_IMM$", + "^LD1RQ_[BHWD]_IMM$")>; // Contiguous load broadcast, scalar + scalar -def : InstRW<[V2Write_6cyc_1L], (instregex "^LD1RQ_[BHWD]$")>; +def : InstRW<[V2Write_6c_1L], (instregex "^LD1RQ_[BHWD]$")>; // Non temporal load, scalar + imm // Non temporal load, scalar + scalar -def : InstRW<[V2Write_6cyc_1L], (instregex "^LDNT1[BHWD]_ZR[IR]$")>; +def : InstRW<[V2Write_6c_1L], (instregex "^LDNT1[BHWD]_ZR[IR]$")>; // Non temporal gather load, vector + scalar 32-bit element size -def : InstRW<[V2Write_9cyc_2L_4V], (instregex "^LDNT1[BHW]_ZZR_S$", - "^LDNT1S[BH]_ZZR_S$")>; +def : InstRW<[V2Write_9c_2L_4V], (instregex "^LDNT1[BHW]_ZZR_S$", + "^LDNT1S[BH]_ZZR_S$")>; // Non temporal gather load, vector + scalar 64-bit element size -def : InstRW<[V2Write_9cyc_2L_2V1], (instregex "^LDNT1S?[BHW]_ZZR_D$")>; -def : InstRW<[V2Write_9cyc_2L_2V1], (instrs LDNT1D_ZZR_D)>; +def : InstRW<[V2Write_9c_2L_2V1], (instregex "^LDNT1S?[BHW]_ZZR_D$")>; +def : InstRW<[V2Write_9c_2L_2V1], (instrs LDNT1D_ZZR_D)>; // Contiguous first faulting load, scalar + scalar -def : InstRW<[V2Write_6cyc_1L_1S], (instregex "^LDFF1[BHWD]$", - "^LDFF1S?B_[HSD]$", - "^LDFF1S?H_[SD]$", - "^LDFF1S?W_D$")>; +def : InstRW<[V2Write_6c_1L_1S], (instregex "^LDFF1[BHWD]$", + "^LDFF1S?B_[HSD]$", + "^LDFF1S?H_[SD]$", + "^LDFF1S?W_D$")>; // Contiguous non faulting load, scalar + imm -def : InstRW<[V2Write_6cyc_1L], (instregex "^LDNF1[BHWD]_IMM$", - "^LDNF1S?B_[HSD]_IMM$", - "^LDNF1S?H_[SD]_IMM$", - "^LDNF1S?W_D_IMM$")>; +def : InstRW<[V2Write_6c_1L], (instregex "^LDNF1[BHWD]_IMM$", + "^LDNF1S?B_[HSD]_IMM$", + "^LDNF1S?H_[SD]_IMM$", + "^LDNF1S?W_D_IMM$")>; // Contiguous Load two structures to two vectors, scalar + imm -def : InstRW<[V2Write_8cyc_2L_2V], (instregex "^LD2[BHWD]_IMM$")>; +def : InstRW<[V2Write_8c_2L_2V], (instregex "^LD2[BHWD]_IMM$")>; // Contiguous Load two structures to two vectors, scalar + scalar -def : InstRW<[V2Write_9cyc_2L_2V_2S], (instregex "^LD2[BHWD]$")>; +def : InstRW<[V2Write_9c_2L_2V_2S], (instregex "^LD2[BHWD]$")>; // Contiguous Load three structures to three vectors, scalar + imm -def : InstRW<[V2Write_9cyc_3L_3V], (instregex "^LD3[BHWD]_IMM$")>; +def : InstRW<[V2Write_9c_3L_3V], (instregex "^LD3[BHWD]_IMM$")>; // Contiguous Load three structures to three vectors, scalar + scalar -def : InstRW<[V2Write_10cyc_3V_3L_3S], (instregex "^LD3[BHWD]$")>; +def : InstRW<[V2Write_10c_3V_3L_3S], (instregex "^LD3[BHWD]$")>; // Contiguous Load four structures to four vectors, scalar + imm -def : InstRW<[V2Write_9cyc_4L_8V], (instregex "^LD4[BHWD]_IMM$")>; +def : InstRW<[V2Write_9c_4L_8V], (instregex "^LD4[BHWD]_IMM$")>; // Contiguous Load four structures to four vectors, scalar + scalar -def : InstRW<[V2Write_10cyc_4L_8V_4S], (instregex "^LD4[BHWD]$")>; +def : InstRW<[V2Write_10c_4L_8V_4S], (instregex "^LD4[BHWD]$")>; // Gather load, vector + imm, 32-bit element size -def : InstRW<[V2Write_9cyc_1L_4V], (instregex "^GLD(FF)?1S?[BH]_S_IMM$", - "^GLD(FF)?1W_IMM$")>; +def : InstRW<[V2Write_9c_1L_4V], (instregex "^GLD(FF)?1S?[BH]_S_IMM$", + "^GLD(FF)?1W_IMM$")>; // Gather load, vector + imm, 64-bit element size -def : InstRW<[V2Write_9cyc_1L_4V], (instregex "^GLD(FF)?1S?[BHW]_D_IMM$", - "^GLD(FF)?1D_IMM$")>; +def : InstRW<[V2Write_9c_1L_4V], (instregex "^GLD(FF)?1S?[BHW]_D_IMM$", + "^GLD(FF)?1D_IMM$")>; // Gather load, 32-bit scaled offset -def : InstRW<[V2Write_10cyc_1L_8V], +def : InstRW<[V2Write_10c_1L_8V], (instregex "^GLD(FF)?1S?H_S_[SU]XTW_SCALED$", "^GLD(FF)?1W_[SU]XTW_SCALED")>; // Gather load, 64-bit scaled offset // NOTE: These instructions are not specified in the SOG. -def : InstRW<[V2Write_10cyc_1L_4V], +def : InstRW<[V2Write_10c_1L_4V], (instregex "^GLD(FF)?1S?[HW]_D_([SU]XTW_)?SCALED$", "^GLD(FF)?1D_([SU]XTW_)?SCALED$")>; // Gather load, 32-bit unpacked unscaled offset -def : InstRW<[V2Write_9cyc_1L_4V], (instregex "^GLD(FF)?1S?[BH]_S_[SU]XTW$", - "^GLD(FF)?1W_[SU]XTW$")>; +def : InstRW<[V2Write_9c_1L_4V], (instregex "^GLD(FF)?1S?[BH]_S_[SU]XTW$", + "^GLD(FF)?1W_[SU]XTW$")>; // Gather load, 64-bit unpacked unscaled offset // NOTE: These instructions are not specified in the SOG. -def : InstRW<[V2Write_9cyc_1L_2V], +def : InstRW<[V2Write_9c_1L_2V], (instregex "^GLD(FF)?1S?[BHW]_D(_[SU]XTW)?$", "^GLD(FF)?1D(_[SU]XTW)?$")>; @@ -2708,120 +2708,120 @@ def : InstRW<[V2Write_9cyc_1L_2V], // ----------------------------------------------------------------------------- // Store from predicate reg -def : InstRW<[V2Write_1cyc_1L01], (instrs STR_PXI)>; +def : InstRW<[V2Write_1c_1L01], (instrs STR_PXI)>; // Store from vector reg -def : InstRW<[V2Write_2cyc_1L01_1V01], (instrs STR_ZXI)>; +def : InstRW<[V2Write_2c_1L01_1V01], (instrs STR_ZXI)>; // Contiguous store, scalar + imm -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "^ST1[BHWD]_IMM$", - "^ST1B_[HSD]_IMM$", - "^ST1H_[SD]_IMM$", - "^ST1W_D_IMM$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "^ST1[BHWD]_IMM$", + "^ST1B_[HSD]_IMM$", + "^ST1H_[SD]_IMM$", + "^ST1W_D_IMM$")>; // Contiguous store, scalar + scalar -def : InstRW<[V2Write_2cyc_1L01_1S_1V01], (instregex "^ST1H(_[SD])?$")>; -def : InstRW<[V2Write_2cyc_1L01_1V01], (instregex "^ST1[BWD]$", - "^ST1B_[HSD]$", - "^ST1W_D$")>; +def : InstRW<[V2Write_2c_1L01_1S_1V01], (instregex "^ST1H(_[SD])?$")>; +def : InstRW<[V2Write_2c_1L01_1V01], (instregex "^ST1[BWD]$", + "^ST1B_[HSD]$", + "^ST1W_D$")>; // Contiguous store two structures from two vectors, scalar + imm -def : InstRW<[V2Write_4cyc_1L01_1V01], (instregex "^ST2[BHWD]_IMM$")>; +def : InstRW<[V2Write_4c_1L01_1V01], (instregex "^ST2[BHWD]_IMM$")>; // Contiguous store two structures from two vectors, scalar + scalar -def : InstRW<[V2Write_4cyc_2L01_2S_2V01], (instrs ST2H)>; -def : InstRW<[V2Write_4cyc_2L01_2V01], (instregex "^ST2[BWD]$")>; +def : InstRW<[V2Write_4c_2L01_2S_2V01], (instrs ST2H)>; +def : InstRW<[V2Write_4c_2L01_2V01], (instregex "^ST2[BWD]$")>; // Contiguous store three structures from three vectors, scalar + imm -def : InstRW<[V2Write_7cyc_9L01_9V01], (instregex "^ST3[BHWD]_IMM$")>; +def : InstRW<[V2Write_7c_9L01_9V01], (instregex "^ST3[BHWD]_IMM$")>; // Contiguous store three structures from three vectors, scalar + scalar -def : InstRW<[V2Write_7cyc_9L01_9S_9V01], (instregex "^ST3[BHWD]$")>; +def : InstRW<[V2Write_7c_9L01_9S_9V01], (instregex "^ST3[BHWD]$")>; // Contiguous store four structures from four vectors, scalar + imm -def : InstRW<[V2Write_11cyc_18L01_18V01], (instregex "^ST4[BHWD]_IMM$")>; +def : InstRW<[V2Write_11c_18L01_18V01], (instregex "^ST4[BHWD]_IMM$")>; // Contiguous store four structures from four vectors, scalar + scalar -def : InstRW<[V2Write_11cyc_18L01_18S_18V01], (instregex "^ST4[BHWD]$")>; +def : InstRW<[V2Write_11c_18L01_18S_18V01], (instregex "^ST4[BHWD]$")>; // Non temporal store, scalar + imm -def : InstRW<[V2Write_2cyc_1L01_1V], (instregex "^STNT1[BHWD]_ZRI$")>; +def : InstRW<[V2Write_2c_1L01_1V], (instregex "^STNT1[BHWD]_ZRI$")>; // Non temporal store, scalar + scalar -def : InstRW<[V2Write_2cyc_1L01_1S_1V], (instrs STNT1H_ZRR)>; -def : InstRW<[V2Write_2cyc_1L01_1V], (instregex "^STNT1[BWD]_ZRR$")>; +def : InstRW<[V2Write_2c_1L01_1S_1V], (instrs STNT1H_ZRR)>; +def : InstRW<[V2Write_2c_1L01_1V], (instregex "^STNT1[BWD]_ZRR$")>; // Scatter non temporal store, vector + scalar 32-bit element size -def : InstRW<[V2Write_4cyc_4L01_4V01], (instregex "^STNT1[BHW]_ZZR_S")>; +def : InstRW<[V2Write_4c_4L01_4V01], (instregex "^STNT1[BHW]_ZZR_S")>; // Scatter non temporal store, vector + scalar 64-bit element size -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "^STNT1[BHWD]_ZZR_D")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "^STNT1[BHWD]_ZZR_D")>; // Scatter store vector + imm 32-bit element size -def : InstRW<[V2Write_4cyc_4L01_4V01], (instregex "^SST1[BH]_S_IMM$", - "^SST1W_IMM$")>; +def : InstRW<[V2Write_4c_4L01_4V01], (instregex "^SST1[BH]_S_IMM$", + "^SST1W_IMM$")>; // Scatter store vector + imm 64-bit element size -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "^SST1[BHW]_D_IMM$", - "^SST1D_IMM$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "^SST1[BHW]_D_IMM$", + "^SST1D_IMM$")>; // Scatter store, 32-bit scaled offset -def : InstRW<[V2Write_4cyc_4L01_4V01], +def : InstRW<[V2Write_4c_4L01_4V01], (instregex "^SST1(H_S|W)_[SU]XTW_SCALED$")>; // Scatter store, 32-bit unpacked unscaled offset -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "^SST1[BHW]_D_[SU]XTW$", - "^SST1D_[SU]XTW$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "^SST1[BHW]_D_[SU]XTW$", + "^SST1D_[SU]XTW$")>; // Scatter store, 32-bit unpacked scaled offset -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "^SST1[HW]_D_[SU]XTW_SCALED$", - "^SST1D_[SU]XTW_SCALED$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "^SST1[HW]_D_[SU]XTW_SCALED$", + "^SST1D_[SU]XTW_SCALED$")>; // Scatter store, 32-bit unscaled offset -def : InstRW<[V2Write_4cyc_4L01_4V01], (instregex "^SST1[BH]_S_[SU]XTW$", - "^SST1W_[SU]XTW$")>; +def : InstRW<[V2Write_4c_4L01_4V01], (instregex "^SST1[BH]_S_[SU]XTW$", + "^SST1W_[SU]XTW$")>; // Scatter store, 64-bit scaled offset -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "^SST1[HW]_D_SCALED$", - "^SST1D_SCALED$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "^SST1[HW]_D_SCALED$", + "^SST1D_SCALED$")>; // Scatter store, 64-bit unscaled offset -def : InstRW<[V2Write_2cyc_2L01_2V01], (instregex "^SST1[BHW]_D$", - "^SST1D$")>; +def : InstRW<[V2Write_2c_2L01_2V01], (instregex "^SST1[BHW]_D$", + "^SST1D$")>; // SVE Miscellaneous instructions // ----------------------------------------------------------------------------- // Read first fault register, unpredicated -def : InstRW<[V2Write_2cyc_1M0], (instrs RDFFR_P)>; +def : InstRW<[V2Write_2c_1M0], (instrs RDFFR_P)>; // Read first fault register, predicated -def : InstRW<[V2Write_3or4cyc_1M0_1M], (instrs RDFFR_PPz)>; +def : InstRW<[V2Write_3or4c_1M0_1M], (instrs RDFFR_PPz)>; // Read first fault register and set flags -def : InstRW<[V2Write_4or5cyc_2M0_2M], (instrs RDFFRS_PPz)>; +def : InstRW<[V2Write_4or5c_2M0_2M], (instrs RDFFRS_PPz)>; // Set first fault register // Write to first fault register -def : InstRW<[V2Write_2cyc_1M0], (instrs SETFFR, WRFFR)>; +def : InstRW<[V2Write_2c_1M0], (instrs SETFFR, WRFFR)>; // Prefetch // NOTE: This is not specified in the SOG. -def : InstRW<[V2Write_4cyc_1L], (instregex "^PRF[BHWD]")>; +def : InstRW<[V2Write_4c_1L], (instregex "^PRF[BHWD]")>; // SVE Cryptographic instructions // ----------------------------------------------------------------------------- // Crypto AES ops -def : InstRW<[V2Write_2cyc_1V], (instregex "^AES[DE]_ZZZ_B$", - "^AESI?MC_ZZ_B$")>; +def : InstRW<[V2Write_2c_1V], (instregex "^AES[DE]_ZZZ_B$", + "^AESI?MC_ZZ_B$")>; // Crypto SHA3 ops -def : InstRW<[V2Write_2cyc_1V0], (instregex "^(BCAX|EOR3)_ZZZZ$", - "^RAX1_ZZZ_D$", - "^XAR_ZZZI_[BHSD]$")>; +def : InstRW<[V2Write_2c_1V0], (instregex "^(BCAX|EOR3)_ZZZZ$", + "^RAX1_ZZZ_D$", + "^XAR_ZZZI_[BHSD]$")>; // Crypto SM4 ops -def : InstRW<[V2Write_4cyc_1V0], (instregex "^SM4E(KEY)?_ZZZ_S$")>; +def : InstRW<[V2Write_4c_1V0], (instregex "^SM4E(KEY)?_ZZZ_S$")>; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 62e22c1..4a861f0c 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10262,6 +10262,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { LoadSDNode *Load = cast<LoadSDNode>(Op); ISD::LoadExtType ExtType = Load->getExtensionType(); EVT MemVT = Load->getMemoryVT(); + MachineMemOperand *MMO = Load->getMemOperand(); if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) { if (MemVT == MVT::i16 && isTypeLegal(MVT::i16)) @@ -10272,7 +10273,6 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { SDValue Chain = Load->getChain(); SDValue BasePtr = Load->getBasePtr(); - MachineMemOperand *MMO = Load->getMemOperand(); EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16; @@ -10329,24 +10329,11 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { unsigned NumElements = MemVT.getVectorNumElements(); if (AS == AMDGPUAS::CONSTANT_ADDRESS || - AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) { - if (!Op->isDivergent() && Alignment >= Align(4) && NumElements < 32) { - if (MemVT.isPow2VectorType() || - (Subtarget->hasScalarDwordx3Loads() && NumElements == 3)) - return SDValue(); - return WidenOrSplitVectorLoad(Op, DAG); - } - // Non-uniform loads will be selected to MUBUF instructions, so they - // have the same legalization requirements as global and private - // loads. - // - } - - if (AS == AMDGPUAS::CONSTANT_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || - AS == AMDGPUAS::GLOBAL_ADDRESS) { - if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() && - Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) && + (AS == AMDGPUAS::GLOBAL_ADDRESS && + Subtarget->getScalarizeGlobalBehavior() && Load->isSimple() && + isMemOpHasNoClobberedMemOperand(Load))) { + if ((!Op->isDivergent() || AMDGPUInstrInfo::isUniformMMO(MMO)) && Alignment >= Align(4) && NumElements < 32) { if (MemVT.isPow2VectorType() || (Subtarget->hasScalarDwordx3Loads() && NumElements == 3)) diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp index 72fa989..b468007 100644 --- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp +++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp @@ -50,6 +50,7 @@ static bool isIntrinsicExpansion(Function &F) { case Intrinsic::dx_sdot: case Intrinsic::dx_udot: case Intrinsic::dx_sign: + case Intrinsic::dx_step: return true; } return false; @@ -322,6 +323,28 @@ static Value *expandPowIntrinsic(CallInst *Orig) { return Exp2Call; } +static Value *expandStepIntrinsic(CallInst *Orig) { + + Value *X = Orig->getOperand(0); + Value *Y = Orig->getOperand(1); + Type *Ty = X->getType(); + IRBuilder<> Builder(Orig); + + Constant *One = ConstantFP::get(Ty->getScalarType(), 1.0); + Constant *Zero = ConstantFP::get(Ty->getScalarType(), 0.0); + Value *Cond = Builder.CreateFCmpOLT(Y, X); + + if (Ty != Ty->getScalarType()) { + auto *XVec = dyn_cast<FixedVectorType>(Ty); + One = ConstantVector::getSplat( + ElementCount::getFixed(XVec->getNumElements()), One); + Zero = ConstantVector::getSplat( + ElementCount::getFixed(XVec->getNumElements()), Zero); + } + + return Builder.CreateSelect(Cond, Zero, One); +} + static Intrinsic::ID getMaxForClamp(Type *ElemTy, Intrinsic::ID ClampIntrinsic) { if (ClampIntrinsic == Intrinsic::dx_uclamp) @@ -433,8 +456,9 @@ static bool expandIntrinsic(Function &F, CallInst *Orig) { case Intrinsic::dx_sign: Result = expandSignIntrinsic(Orig); break; + case Intrinsic::dx_step: + Result = expandStepIntrinsic(Orig); } - if (Result) { Orig->replaceAllUsesWith(Result); Orig->eraseFromParent(); diff --git a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp index 8bb9497..d643017 100644 --- a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.cpp @@ -40,6 +40,9 @@ void LoongArchAsmPrinter::emitInstruction(const MachineInstr *MI) { } switch (MI->getOpcode()) { + case TargetOpcode::STATEPOINT: + LowerSTATEPOINT(*MI); + return; case TargetOpcode::PATCHABLE_FUNCTION_ENTER: LowerPATCHABLE_FUNCTION_ENTER(*MI); return; @@ -146,6 +149,46 @@ bool LoongArchAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, return false; } +void LoongArchAsmPrinter::LowerSTATEPOINT(const MachineInstr &MI) { + StatepointOpers SOpers(&MI); + if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { + assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); + emitNops(PatchBytes / 4); + } else { + // Lower call target and choose correct opcode. + const MachineOperand &CallTarget = SOpers.getCallTarget(); + MCOperand CallTargetMCOp; + switch (CallTarget.getType()) { + case MachineOperand::MO_GlobalAddress: + case MachineOperand::MO_ExternalSymbol: + lowerOperand(CallTarget, CallTargetMCOp); + EmitToStreamer(*OutStreamer, + MCInstBuilder(LoongArch::BL).addOperand(CallTargetMCOp)); + break; + case MachineOperand::MO_Immediate: + CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); + EmitToStreamer(*OutStreamer, + MCInstBuilder(LoongArch::BL).addOperand(CallTargetMCOp)); + break; + case MachineOperand::MO_Register: + CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); + EmitToStreamer(*OutStreamer, MCInstBuilder(LoongArch::JIRL) + .addReg(LoongArch::R1) + .addOperand(CallTargetMCOp) + .addImm(0)); + break; + default: + llvm_unreachable("Unsupported operand type in statepoint call target"); + break; + } + } + + auto &Ctx = OutStreamer->getContext(); + MCSymbol *MILabel = Ctx.createTempSymbol(); + OutStreamer->emitLabel(MILabel); + SM.recordStatepoint(*MILabel, MI); +} + void LoongArchAsmPrinter::LowerPATCHABLE_FUNCTION_ENTER( const MachineInstr &MI) { const Function &F = MF->getFunction(); diff --git a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h index 9da9088..fc12f10 100644 --- a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h +++ b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h @@ -15,6 +15,7 @@ #include "LoongArchSubtarget.h" #include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/CodeGen/StackMaps.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Support/Compiler.h" @@ -41,6 +42,7 @@ public: bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS) override; + void LowerSTATEPOINT(const MachineInstr &MI); void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI); void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI); void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI); diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp index cfd0ecb..bfafb33 100644 --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -4588,6 +4588,20 @@ MachineBasicBlock *LoongArchTargetLowering::EmitInstrWithCustomInserter( return emitPseudoXVINSGR2VR(MI, BB, Subtarget); case LoongArch::PseudoCTPOP: return emitPseudoCTPOP(MI, BB, Subtarget); + case TargetOpcode::STATEPOINT: + // STATEPOINT is a pseudo instruction which has no implicit defs/uses + // while bl call instruction (where statepoint will be lowered at the + // end) has implicit def. This def is early-clobber as it will be set at + // the moment of the call and earlier than any use is read. + // Add this implicit dead def here as a workaround. + MI.addOperand(*MI.getMF(), + MachineOperand::CreateReg( + LoongArch::R1, /*isDef*/ true, + /*isImp*/ true, /*isKill*/ false, /*isDead*/ true, + /*isUndef*/ false, /*isEarlyClobber*/ true)); + if (!Subtarget.is64Bit()) + report_fatal_error("STATEPOINT is only supported on 64-bit targets"); + return emitPatchPoint(MI, BB); } } diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp index d1af651..a01f2ed 100644 --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp @@ -17,6 +17,7 @@ #include "MCTargetDesc/LoongArchMCTargetDesc.h" #include "MCTargetDesc/LoongArchMatInt.h" #include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/CodeGen/StackMaps.h" #include "llvm/MC/MCInstBuilder.h" using namespace llvm; @@ -236,7 +237,25 @@ unsigned LoongArchInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); } - return MI.getDesc().getSize(); + + unsigned NumBytes = 0; + const MCInstrDesc &Desc = MI.getDesc(); + + // Size should be preferably set in + // llvm/lib/Target/LoongArch/LoongArch*InstrInfo.td (default case). + // Specific cases handle instructions of variable sizes. + switch (Desc.getOpcode()) { + default: + return Desc.getSize(); + case TargetOpcode::STATEPOINT: + NumBytes = StatepointOpers(&MI).getNumPatchBytes(); + assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); + // No patch bytes means a normal call inst (i.e. `bl`) is emitted. + if (NumBytes == 0) + NumBytes = 4; + break; + } + return NumBytes; } bool LoongArchInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index 7d8278c..19a5dcc 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -881,38 +881,52 @@ bool MipsFastISel::selectLogicalOp(const Instruction *I) { } bool MipsFastISel::selectLoad(const Instruction *I) { + const LoadInst *LI = cast<LoadInst>(I); + // Atomic loads need special handling. - if (cast<LoadInst>(I)->isAtomic()) + if (LI->isAtomic()) return false; // Verify we have a legal type before going any further. MVT VT; - if (!isLoadTypeLegal(I->getType(), VT)) + if (!isLoadTypeLegal(LI->getType(), VT)) + return false; + + // Underaligned loads need special handling. + if (LI->getAlign() < VT.getFixedSizeInBits() / 8 && + !Subtarget->systemSupportsUnalignedAccess()) return false; // See if we can handle this address. Address Addr; - if (!computeAddress(I->getOperand(0), Addr)) + if (!computeAddress(LI->getOperand(0), Addr)) return false; unsigned ResultReg; if (!emitLoad(VT, ResultReg, Addr)) return false; - updateValueMap(I, ResultReg); + updateValueMap(LI, ResultReg); return true; } bool MipsFastISel::selectStore(const Instruction *I) { - Value *Op0 = I->getOperand(0); + const StoreInst *SI = cast<StoreInst>(I); + + Value *Op0 = SI->getOperand(0); unsigned SrcReg = 0; // Atomic stores need special handling. - if (cast<StoreInst>(I)->isAtomic()) + if (SI->isAtomic()) return false; // Verify we have a legal type before going any further. MVT VT; - if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT)) + if (!isLoadTypeLegal(SI->getOperand(0)->getType(), VT)) + return false; + + // Underaligned stores need special handling. + if (SI->getAlign() < VT.getFixedSizeInBits() / 8 && + !Subtarget->systemSupportsUnalignedAccess()) return false; // Get the value to be stored into a register. @@ -922,7 +936,7 @@ bool MipsFastISel::selectStore(const Instruction *I) { // See if we can handle this address. Address Addr; - if (!computeAddress(I->getOperand(1), Addr)) + if (!computeAddress(SI->getOperand(1), Addr)) return false; if (!emitStore(VT, SrcReg, Addr)) diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index 59f78a8..4345b8e 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -880,30 +880,73 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const MipsSubtarget &Subtarget) { - // Pattern match INS. - // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1), - // where mask1 = (2**size - 1) << pos, mask0 = ~mask1 - // => ins $dst, $src, size, pos, $src1 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert()) return SDValue(); - SDValue And0 = N->getOperand(0), And1 = N->getOperand(1); + SDValue FirstOperand = N->getOperand(0), SecondOperand = N->getOperand(1); unsigned SMPos0, SMSize0, SMPos1, SMSize1; ConstantSDNode *CN, *CN1; + if ((FirstOperand.getOpcode() == ISD::AND && + SecondOperand.getOpcode() == ISD::SHL) || + (FirstOperand.getOpcode() == ISD::SHL && + SecondOperand.getOpcode() == ISD::AND)) { + // Pattern match INS. + // $dst = or (and $src1, (2**size0 - 1)), (shl $src2, size0) + // ==> ins $src1, $src2, pos, size, pos = size0, size = 32 - pos; + // Or: + // $dst = or (shl $src2, size0), (and $src1, (2**size0 - 1)) + // ==> ins $src1, $src2, pos, size, pos = size0, size = 32 - pos; + SDValue AndOperand0 = FirstOperand.getOpcode() == ISD::AND + ? FirstOperand.getOperand(0) + : SecondOperand.getOperand(0); + SDValue ShlOperand0 = FirstOperand.getOpcode() == ISD::AND + ? SecondOperand.getOperand(0) + : FirstOperand.getOperand(0); + SDValue AndMask = FirstOperand.getOpcode() == ISD::AND + ? FirstOperand.getOperand(1) + : SecondOperand.getOperand(1); + if (!(CN = dyn_cast<ConstantSDNode>(AndMask)) || + !isShiftedMask_64(CN->getZExtValue(), SMPos0, SMSize0)) + return SDValue(); + + SDValue ShlShift = FirstOperand.getOpcode() == ISD::AND + ? SecondOperand.getOperand(1) + : FirstOperand.getOperand(1); + if (!(CN = dyn_cast<ConstantSDNode>(ShlShift))) + return SDValue(); + uint64_t ShlShiftValue = CN->getZExtValue(); + + if (SMPos0 != 0 || SMSize0 != ShlShiftValue) + return SDValue(); + + SDLoc DL(N); + EVT ValTy = N->getValueType(0); + SMPos1 = ShlShiftValue; + assert(SMPos1 < ValTy.getSizeInBits()); + SMSize1 = (ValTy == MVT::i64 ? 64 : 32) - SMPos1; + return DAG.getNode(MipsISD::Ins, DL, ValTy, ShlOperand0, + DAG.getConstant(SMPos1, DL, MVT::i32), + DAG.getConstant(SMSize1, DL, MVT::i32), AndOperand0); + } + // See if Op's first operand matches (and $src1 , mask0). - if (And0.getOpcode() != ISD::AND) + if (FirstOperand.getOpcode() != ISD::AND) return SDValue(); - if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) || + // Pattern match INS. + // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1), + // where mask1 = (2**size - 1) << pos, mask0 = ~mask1 + // => ins $dst, $src, size, pos, $src1 + if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) || !isShiftedMask_64(~CN->getSExtValue(), SMPos0, SMSize0)) return SDValue(); // See if Op's second operand matches (and (shl $src, pos), mask1). - if (And1.getOpcode() == ISD::AND && - And1.getOperand(0).getOpcode() == ISD::SHL) { + if (SecondOperand.getOpcode() == ISD::AND && + SecondOperand.getOperand(0).getOpcode() == ISD::SHL) { - if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) || + if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand.getOperand(1))) || !isShiftedMask_64(CN->getZExtValue(), SMPos1, SMSize1)) return SDValue(); @@ -911,7 +954,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, if (SMPos0 != SMPos1 || SMSize0 != SMSize1) return SDValue(); - SDValue Shl = And1.getOperand(0); + SDValue Shl = SecondOperand.getOperand(0); if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1)))) return SDValue(); @@ -928,7 +971,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0), DAG.getConstant(SMPos0, DL, MVT::i32), DAG.getConstant(SMSize0, DL, MVT::i32), - And0.getOperand(0)); + FirstOperand.getOperand(0)); } else { // Pattern match DINS. // $dst = or (and $src, mask0), mask1 @@ -938,9 +981,9 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) || (SMSize0 + SMPos0 <= 32))) { // Check if AND instruction has constant as argument - bool isConstCase = And1.getOpcode() != ISD::AND; - if (And1.getOpcode() == ISD::AND) { - if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1)))) + bool isConstCase = SecondOperand.getOpcode() != ISD::AND; + if (SecondOperand.getOpcode() == ISD::AND) { + if (!(CN1 = dyn_cast<ConstantSDNode>(SecondOperand->getOperand(1)))) return SDValue(); } else { if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)))) @@ -957,7 +1000,8 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, SDValue SrlX; if (!isConstCase) { Const1 = DAG.getConstant(SMPos0, DL, MVT::i32); - SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1); + SrlX = DAG.getNode(ISD::SRL, DL, SecondOperand->getValueType(0), + SecondOperand, Const1); } return DAG.getNode( MipsISD::Ins, DL, N->getValueType(0), @@ -968,8 +1012,7 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31 : SMSize0, DL, MVT::i32), - And0->getOperand(0)); - + FirstOperand->getOperand(0)); } return SDValue(); } diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp index 5c5766a..bb4fc80 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -838,8 +838,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM, setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::bf16, Expand); setOperationAction(ISD::FCOPYSIGN, MVT::v2bf16, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); - setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); + setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); + setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); // These map to corresponding instructions for f32/f64. f16 must be // promoted to f32. v2f16 is expanded to f16, which is then promoted @@ -964,6 +964,7 @@ const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { MAKE_CASE(NVPTXISD::BFE) MAKE_CASE(NVPTXISD::BFI) MAKE_CASE(NVPTXISD::PRMT) + MAKE_CASE(NVPTXISD::FCOPYSIGN) MAKE_CASE(NVPTXISD::DYNAMIC_STACKALLOC) MAKE_CASE(NVPTXISD::SETP_F16X2) MAKE_CASE(NVPTXISD::SETP_BF16X2) @@ -2560,6 +2561,23 @@ SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op, } } +/// If the types match, convert the generic copysign to the NVPTXISD version, +/// otherwise bail ensuring that mismatched cases are properly expaned. +SDValue NVPTXTargetLowering::LowerFCOPYSIGN(SDValue Op, + SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc DL(Op); + + SDValue In1 = Op.getOperand(0); + SDValue In2 = Op.getOperand(1); + EVT SrcVT = In2.getValueType(); + + if (!SrcVT.bitsEq(VT)) + return SDValue(); + + return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2); +} + SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); @@ -2803,6 +2821,8 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { return LowerSelect(Op, DAG); case ISD::FROUND: return LowerFROUND(Op, DAG); + case ISD::FCOPYSIGN: + return LowerFCOPYSIGN(Op, DAG); case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h index 32e6b04..70e16ee 100644 --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -61,6 +61,7 @@ enum NodeType : unsigned { BFE, BFI, PRMT, + FCOPYSIGN, DYNAMIC_STACKALLOC, BrxStart, BrxItem, @@ -623,6 +624,8 @@ private: SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFROUND32(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index 0c88309..656fc67 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -978,6 +978,22 @@ def INT_NVVM_FABS_D : F_MATH_1<"abs.f64 \t$dst, $src0;", Float64Regs, Float64Regs, int_nvvm_fabs_d>; // +// copysign +// + +def fcopysign_nvptx : SDNode<"NVPTXISD::FCOPYSIGN", SDTFPBinOp>; + +def COPYSIGN_F : + NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src0, Float32Regs:$src1), + "copysign.f32 \t$dst, $src0, $src1;", + [(set Float32Regs:$dst, (fcopysign_nvptx Float32Regs:$src1, Float32Regs:$src0))]>; + +def COPYSIGN_D : + NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src0, Float64Regs:$src1), + "copysign.f64 \t$dst, $src0, $src1;", + [(set Float64Regs:$dst, (fcopysign_nvptx Float64Regs:$src1, Float64Regs:$src0))]>; + +// // Abs, Neg bf16, bf16x2 // diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp index deba859..fc276d1 100644 --- a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp +++ b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp @@ -521,13 +521,6 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, const RISCVTargetLowering &TLI = *Subtarget.getTargetLowering(); RISCVABI::ABI ABI = Subtarget.getTargetABI(); - if (LocVT == MVT::i32 || LocVT == MVT::i64) { - if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { - State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); - return false; - } - } - if ((LocVT == MVT::f16 && Subtarget.hasStdExtZfhmin()) || (LocVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin())) { static const MCPhysReg FPR16List[] = { @@ -565,6 +558,8 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, } } + MVT XLenVT = Subtarget.getXLenVT(); + // Check if there is an available GPR before hitting the stack. if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) || (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) || @@ -572,7 +567,7 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, Subtarget.hasStdExtZdinx())) { if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { if (LocVT.getSizeInBits() != Subtarget.getXLen()) { - LocVT = Subtarget.getXLenVT(); + LocVT = XLenVT; State.addLoc( CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; @@ -582,58 +577,39 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, } } - if (LocVT == MVT::f16 || LocVT == MVT::bf16) { - int64_t Offset2 = State.AllocateStack(2, Align(2)); - State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo)); - return false; - } - - if (LocVT == MVT::i32 || LocVT == MVT::f32) { - int64_t Offset4 = State.AllocateStack(4, Align(4)); - State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); - return false; - } - - if (LocVT == MVT::i64 || LocVT == MVT::f64) { - int64_t Offset5 = State.AllocateStack(8, Align(8)); - State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); - return false; - } + ArrayRef<MCPhysReg> ArgGPRs = getFastCCArgGPRs(ABI); if (LocVT.isVector()) { if (MCRegister Reg = allocateRVVReg(ValVT, ValNo, State, TLI)) { // Fixed-length vectors are located in the corresponding scalable-vector // container types. - if (ValVT.isFixedLengthVector()) + if (LocVT.isFixedLengthVector()) LocVT = TLI.getContainerForFixedLengthVector(LocVT); State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; } - // Try and pass the address via a "fast" GPR. - if (MCRegister GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) { + // Pass scalable vectors indirectly. Pass fixed vectors indirectly if we + // have a free GPR. + if (LocVT.isScalableVector() || + State.getFirstUnallocated(ArgGPRs) != ArgGPRs.size()) { LocInfo = CCValAssign::Indirect; - LocVT = Subtarget.getXLenVT(); - State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo)); - return false; + LocVT = XLenVT; } + } - // Pass scalable vectors indirectly by storing the pointer on the stack. - if (ValVT.isScalableVector()) { - LocInfo = CCValAssign::Indirect; - LocVT = Subtarget.getXLenVT(); - unsigned XLen = Subtarget.getXLen(); - int64_t StackOffset = State.AllocateStack(XLen / 8, Align(XLen / 8)); - State.addLoc( - CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); + if (LocVT == XLenVT) { + if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) { + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; } + } - // Pass fixed-length vectors on the stack. - auto StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); - int64_t StackOffset = State.AllocateStack(ValVT.getStoreSize(), StackAlign); - State.addLoc( - CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); + if (LocVT == XLenVT || LocVT == MVT::f16 || LocVT == MVT::bf16 || + LocVT == MVT::f32 || LocVT == MVT::f64 || LocVT.isFixedLengthVector()) { + Align StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); + int64_t Offset = State.AllocateStack(LocVT.getStoreSize(), StackAlign); + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return false; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index ab52f97..ccc74ca 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1078,7 +1078,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP}, VT, Custom); setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, - ISD::EXTRACT_SUBVECTOR}, + ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_INTERLEAVE, + ISD::VECTOR_DEINTERLEAVE}, VT, Custom); if (Subtarget.hasStdExtZfhmin()) setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); @@ -1117,7 +1118,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, Custom); setOperationAction(ISD::SELECT_CC, VT, Expand); setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, - ISD::EXTRACT_SUBVECTOR}, + ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_INTERLEAVE, + ISD::VECTOR_DEINTERLEAVE}, VT, Custom); if (Subtarget.hasStdExtZfbfmin()) setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index 831d7f7..b526c9f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -263,6 +263,9 @@ private: bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; + bool selectStep(Register ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; + bool selectUnmergeValues(MachineInstr &I) const; Register buildI32Constant(uint32_t Val, MachineInstr &I, @@ -1710,6 +1713,25 @@ bool SPIRVInstructionSelector::selectSign(Register ResVReg, return Result; } +bool SPIRVInstructionSelector::selectStep(Register ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + + assert(I.getNumOperands() == 4); + assert(I.getOperand(2).isReg()); + assert(I.getOperand(3).isReg()); + MachineBasicBlock &BB = *I.getParent(); + + return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450)) + .addImm(GL::Step) + .addUse(I.getOperand(2).getReg()) + .addUse(I.getOperand(3).getReg()) + .constrainAllUses(TII, TRI, RBI); +} + bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const { @@ -2468,6 +2490,8 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, .addUse(GR.getSPIRVTypeID(ResType)) .addUse(GR.getOrCreateConstInt(3, I, IntTy, TII)); } + case Intrinsic::spv_step: + return selectStep(ResVReg, ResType, I); default: { std::string DiagMsg; raw_string_ostream OS(DiagMsg); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c3b9199..3c5b952 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -336,6 +336,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom); } } + if (Subtarget.hasAVX10_2()) { + setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i32, Legal); + setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i32, Legal); + if (Subtarget.is64Bit()) { + setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Legal); + setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Legal); + } + } // Handle address space casts between mixed sized pointers. setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); @@ -34353,6 +34361,16 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(CTEST) NODE_NAME_CASE(CLOAD) NODE_NAME_CASE(CSTORE) + NODE_NAME_CASE(CVTTS2SIS) + NODE_NAME_CASE(CVTTS2UIS) + NODE_NAME_CASE(CVTTS2SIS_SAE) + NODE_NAME_CASE(CVTTS2UIS_SAE) + NODE_NAME_CASE(CVTTP2SIS) + NODE_NAME_CASE(MCVTTP2SIS) + NODE_NAME_CASE(CVTTP2UIS_SAE) + NODE_NAME_CASE(CVTTP2SIS_SAE) + NODE_NAME_CASE(CVTTP2UIS) + NODE_NAME_CASE(MCVTTP2UIS) } return nullptr; #undef NODE_NAME_CASE @@ -37757,7 +37775,9 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, case X86ISD::VFPROUND: case X86ISD::VMFPROUND: case X86ISD::CVTPS2PH: - case X86ISD::MCVTPS2PH: { + case X86ISD::MCVTPS2PH: + case X86ISD::MCVTTP2SIS: + case X86ISD::MCVTTP2UIS: { // Truncations/Conversions - upper elements are known zero. EVT SrcVT = Op.getOperand(0).getValueType(); if (SrcVT.isVector()) { diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 93d2b3e..5fb5886 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -674,6 +674,18 @@ namespace llvm { CVTTP2UI, CVTTP2SI_SAE, CVTTP2UI_SAE, + + // Saturation enabled Vector float/double to signed/unsigned + // integer with truncation. + CVTTP2SIS, + CVTTP2UIS, + CVTTP2SIS_SAE, + CVTTP2UIS_SAE, + // Masked versions of above. Used for v2f64 to v4i32. + // SRC, PASSTHRU, MASK + MCVTTP2SIS, + MCVTTP2UIS, + // Scalar float/double to signed/unsigned integer with truncation. CVTTS2SI, CVTTS2UI, @@ -684,6 +696,12 @@ namespace llvm { CVTSI2P, CVTUI2P, + // Scalar float/double to signed/unsigned integer with saturation. + CVTTS2SIS, + CVTTS2UIS, + CVTTS2SIS_SAE, + CVTTS2UIS_SAE, + // Masked versions of above. Used for v2f64->v4f32. // SRC, PASSTHRU, MASK MCVTP2SI, diff --git a/llvm/lib/Target/X86/X86InstrAVX10.td b/llvm/lib/Target/X86/X86InstrAVX10.td index b0eb210..ada2bba 100644 --- a/llvm/lib/Target/X86/X86InstrAVX10.td +++ b/llvm/lib/Target/X86/X86InstrAVX10.td @@ -626,6 +626,318 @@ defm VCVTTPS2IUBS : avx10_sat_cvt_base<0x6a, "vcvttps2iubs", SchedWriteVecIMul, AVX512PDIi8Base, T_MAP5, EVEX_CD8<32, CD8VF>; //------------------------------------------------- +// AVX10 SATCVT-DS instructions +//------------------------------------------------- + +// Convert Double to Signed/Unsigned Doubleword with truncation. +multiclass avx10_cvttpd2dqs<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, + SDNode MaskOpNode, SDNode OpNodeSAE, + X86SchedWriteWidths sched> { + let Predicates = [HasAVX10_2_512] in { + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f64_info, OpNode, + MaskOpNode, sched.ZMM>, + avx512_vcvt_fp_sae<opc, OpcodeStr, v8i32x_info, v8f64_info, + OpNodeSAE, sched.ZMM>, EVEX_V512; + } + let Predicates = [HasAVX10_2] in { + defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v2f64x_info, + null_frag, null_frag, sched.XMM, "{1to2}", "{x}", + f128mem, VK2WM>, EVEX_V128; + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNode, + MaskOpNode, sched.YMM, "{1to4}", "{y}">, EVEX_V256; + } + + let Predicates = [HasAVX10_2], hasEVEX_U=1 in { + defm Z256 : avx512_vcvt_fp_sae<opc, OpcodeStr, v4i32x_info, v4f64x_info, OpNodeSAE, + sched.YMM>, EVEX_V256; + } + + + def : InstAlias<OpcodeStr#"x\t{$src, $dst|$dst, $src}", + (!cast<Instruction>(NAME # "Z128rr") VR128X:$dst, + VR128X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"x\t{$src, $dst {${mask}}|$dst {${mask}}, $src}", + (!cast<Instruction>(NAME # "Z128rrk") VR128X:$dst, + VK2WM:$mask, VR128X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"x\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}", + (!cast<Instruction>(NAME # "Z128rrkz") VR128X:$dst, + VK2WM:$mask, VR128X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"x\t{${src}{1to2}, $dst|$dst, ${src}{1to2}}", + (!cast<Instruction>(NAME # "Z128rmb") VR128X:$dst, + f64mem:$src), 0, "att">; + def : InstAlias<OpcodeStr#"x\t{${src}{1to2}, $dst {${mask}}|" + "$dst {${mask}}, ${src}{1to2}}", + (!cast<Instruction>(NAME # "Z128rmbk") VR128X:$dst, + VK2WM:$mask, f64mem:$src), 0, "att">; + def : InstAlias<OpcodeStr#"x\t{${src}{1to2}, $dst {${mask}} {z}|" + "$dst {${mask}} {z}, ${src}{1to2}}", + (!cast<Instruction>(NAME # "Z128rmbkz") VR128X:$dst, + VK2WM:$mask, f64mem:$src), 0, "att">; + + def : InstAlias<OpcodeStr#"y\t{$src, $dst|$dst, $src}", + (!cast<Instruction>(NAME # "Z256rr") VR128X:$dst, + VR256X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{{sae} $src, $dst|$dst, $src {sae}}", + (!cast<Instruction>(NAME # "Z256rrb") VR128X:$dst, + VR256X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{$src, $dst {${mask}}|$dst {${mask}}, $src}", + (!cast<Instruction>(NAME # "Z256rrk") VR128X:$dst, + VK4WM:$mask, VR256X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{{sae} $src, $dst {${mask}}|$dst {${mask}}, $src {sae}}", + (!cast<Instruction>(NAME # "Z256rrbk") VR128X:$dst, + VK4WM:$mask, VR256X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}", + (!cast<Instruction>(NAME # "Z256rrkz") VR128X:$dst, + VK4WM:$mask, VR256X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{{sae} $src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src {sae}}", + (!cast<Instruction>(NAME # "Z256rrbkz") VR128X:$dst, + VK4WM:$mask, VR256X:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{${src}{1to4}, $dst|$dst, ${src}{1to4}}", + (!cast<Instruction>(NAME # "Z256rmb") VR128X:$dst, + f64mem:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{${src}{1to4}, $dst {${mask}}|" + "$dst {${mask}}, ${src}{1to4}}", + (!cast<Instruction>(NAME # "Z256rmbk") VR128X:$dst, + VK4WM:$mask, f64mem:$src), 0, "att">; + def : InstAlias<OpcodeStr#"y\t{${src}{1to4}, $dst {${mask}} {z}|" + "$dst {${mask}} {z}, ${src}{1to4}}", + (!cast<Instruction>(NAME # "Z256rmbkz") VR128X:$dst, + VK4WM:$mask, f64mem:$src), 0, "att">; +} + +// Convert Double to Signed/Unsigned Quardword with truncation saturationn enabled +multiclass avx10_cvttpd2qqs<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, + SDNode MaskOpNode, SDNode OpNodeRnd, + X86SchedWriteWidths sched> { + let Predicates = [HasAVX10_2_512] in { + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f64_info, OpNode, + MaskOpNode, sched.ZMM>, + avx512_vcvt_fp_sae<opc, OpcodeStr, v8i64_info, v8f64_info, + OpNodeRnd, sched.ZMM>, EVEX_V512; + } + let Predicates = [HasAVX10_2] in { + defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v2f64x_info, OpNode, + MaskOpNode, sched.XMM>, EVEX_V128; + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f64x_info, OpNode, + MaskOpNode, sched.YMM>, EVEX_V256; + } + let Predicates = [HasAVX10_2], hasEVEX_U=1 in { + defm Z256 : avx512_vcvt_fp_sae<opc, OpcodeStr, v4i64x_info, v4f64x_info, + OpNodeRnd, sched.YMM>, EVEX_V256; + } +} + +// Convert Float to Signed/Unsigned Quardword with truncation +multiclass avx10_cvttps2qqs<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, + SDNode MaskOpNode, SDNode OpNodeRnd, + X86SchedWriteWidths sched> { + let Predicates = [HasAVX10_2_512] in { + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v8i64_info, v8f32x_info, OpNode, + MaskOpNode, sched.ZMM>, + avx512_vcvt_fp_sae<opc, OpcodeStr, v8i64_info, v8f32x_info, + OpNodeRnd, sched.ZMM>, EVEX_V512; + } + let Predicates = [HasAVX10_2] in { + defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2i64x_info, v4f32x_info, OpNode, + MaskOpNode, sched.XMM, "{1to2}", "", f64mem, VK2WM, + (v2i64 (OpNode (bc_v4f32 (v2f64 + (scalar_to_vector (loadf64 addr:$src)))))), + (v2i64 (MaskOpNode (bc_v4f32 (v2f64 + (scalar_to_vector (loadf64 addr:$src))))))>, + EVEX_V128; + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4i64x_info, v4f32x_info, OpNode, + MaskOpNode, sched.YMM>, EVEX_V256; + } + + let Predicates = [HasAVX10_2], hasEVEX_U=1 in { + defm Z256 : avx512_vcvt_fp_sae<opc, OpcodeStr, v4i64x_info, v4f32x_info, OpNodeRnd, + sched.YMM>, EVEX_V256; + } +} + +// Convert Float to Signed/Unsigned Doubleword with truncation +multiclass avx10_cvttps2dqs<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode, + SDNode MaskOpNode, + SDNode OpNodeSAE, X86SchedWriteWidths sched> { + let Predicates = [HasAVX10_2_512] in { + defm Z : avx512_vcvt_fp<opc, OpcodeStr, v16i32_info, v16f32_info, OpNode, + MaskOpNode, sched.ZMM>, + avx512_vcvt_fp_sae<opc, OpcodeStr, v16i32_info, v16f32_info, + OpNodeSAE, sched.ZMM>, EVEX_V512; + } + + let Predicates = [HasAVX10_2] in { + defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v4i32x_info, v4f32x_info, OpNode, + MaskOpNode, sched.XMM>, EVEX_V128; + defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8i32x_info, v8f32x_info, OpNode, + MaskOpNode, sched.YMM>, EVEX_V256; + } + + let Predicates = [HasAVX10_2], hasEVEX_U=1 in { + defm Z256 : avx512_vcvt_fp_sae<opc, OpcodeStr, v8i32x_info, v8f32x_info, + OpNodeSAE, sched.YMM>, EVEX_V256; + } +} + +defm VCVTTPD2DQS : avx10_cvttpd2dqs<0x6D, "vcvttpd2dqs", X86cvttp2sis, + X86cvttp2sis, X86cvttp2sisSAE, + SchedWriteCvtPD2DQ>, + PD, REX_W, T_MAP5,PS, EVEX_CD8<64, CD8VF>; +defm VCVTTPD2UDQS : avx10_cvttpd2dqs<0x6C, "vcvttpd2udqs", X86cvttp2uis, + X86cvttp2uis, X86cvttp2uisSAE, + SchedWriteCvtPD2DQ>, + REX_W, T_MAP5,PS, EVEX_CD8<64, CD8VF>; +defm VCVTTPS2DQS : avx10_cvttps2dqs<0x6D, "vcvttps2dqs", X86cvttp2sis, + X86cvttp2sis, X86cvttp2sisSAE, + SchedWriteCvtPS2DQ>, T_MAP5,PS, + EVEX_CD8<32, CD8VF>; +defm VCVTTPS2UDQS : avx10_cvttps2dqs<0x6C, "vcvttps2udqs", X86cvttp2uis, + X86cvttp2uis, X86cvttp2uisSAE, + SchedWriteCvtPS2DQ>, T_MAP5,PS, + EVEX_CD8<32, CD8VF>; +defm VCVTTPD2QQS : avx10_cvttpd2qqs<0x6D, "vcvttpd2qqs", X86cvttp2sis, + X86cvttp2sis, X86cvttp2sisSAE, + SchedWriteCvtPD2DQ>, REX_W, T_MAP5,PD, + EVEX_CD8<64, CD8VF>; +defm VCVTTPS2QQS : avx10_cvttps2qqs<0x6D, "vcvttps2qqs", X86cvttp2sis, + X86cvttp2sis, X86cvttp2sisSAE, + SchedWriteCvtPS2DQ>, T_MAP5,PD, + EVEX_CD8<32, CD8VH>; +defm VCVTTPD2UQQS : avx10_cvttpd2qqs<0x6C, "vcvttpd2uqqs", X86cvttp2uis, + X86cvttp2uis, X86cvttp2uisSAE, + SchedWriteCvtPD2DQ>, REX_W, T_MAP5,PD, + EVEX_CD8<64, CD8VF>; +defm VCVTTPS2UQQS : avx10_cvttps2qqs<0x6C, "vcvttps2uqqs", X86cvttp2uis, + X86cvttp2uis, X86cvttp2uisSAE, + SchedWriteCvtPS2DQ>, T_MAP5,PD, + EVEX_CD8<32, CD8VH>; + +let Predicates = [HasAVX10_2] in { +// Special patterns to allow use of X86mcvttp2si for masking. Instruction +// patterns have been disabled with null_frag. +// Patterns VCVTTPD2DQSZ128 + +def : Pat<(v4i32 (X86cvttp2sis (v2f64 VR128X:$src))), + (VCVTTPD2DQSZ128rr VR128X:$src)>; +def : Pat<(v4i32 (X86cvttp2sis (loadv2f64 addr:$src))), + (VCVTTPD2DQSZ128rm addr:$src)>; +def : Pat<(v4i32 (X86cvttp2sis (v2f64 (X86VBroadcastld64 addr:$src)))), + (VCVTTPD2DQSZ128rmb addr:$src)>; +def : Pat<(X86mcvttp2sis (v2f64 VR128X:$src), (v4i32 VR128X:$src0), + VK2WM:$mask), + (VCVTTPD2DQSZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>; +def : Pat<(X86mcvttp2sis (v2f64 VR128X:$src), v4i32x_info.ImmAllZerosV, + VK2WM:$mask), + (VCVTTPD2DQSZ128rrkz VK2WM:$mask, VR128X:$src)>; +def : Pat<(X86mcvttp2sis (loadv2f64 addr:$src), (v4i32 VR128X:$src0), + VK2WM:$mask), + (VCVTTPD2DQSZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; +def : Pat<(X86mcvttp2sis (loadv2f64 addr:$src), v4i32x_info.ImmAllZerosV, + VK2WM:$mask), + (VCVTTPD2DQSZ128rmkz VK2WM:$mask, addr:$src)>; +def : Pat<(X86mcvttp2sis (v2f64 (X86VBroadcastld64 addr:$src)), + (v4i32 VR128X:$src0), VK2WM:$mask), + (VCVTTPD2DQSZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>; +def : Pat<(X86mcvttp2sis (v2f64 (X86VBroadcastld64 addr:$src)), + v4i32x_info.ImmAllZerosV, VK2WM:$mask), + (VCVTTPD2DQSZ128rmbkz VK2WM:$mask, addr:$src)>; + +// Patterns VCVTTPD2UDQSZ128 +def : Pat<(v4i32 (X86cvttp2uis (v2f64 (X86VBroadcastld64 addr:$src)))), + (VCVTTPD2UDQSZ128rmb addr:$src)>; +def : Pat<(v4i32 (X86cvttp2uis (v2f64 VR128X:$src))), + (VCVTTPD2UDQSZ128rr VR128X:$src)>; +def : Pat<(v4i32 (X86cvttp2uis (v2f64 (X86VBroadcastld64 addr:$src)))), + (VCVTTPD2UDQSZ128rmb addr:$src)>; +def : Pat<(X86mcvttp2uis (v2f64 VR128X:$src), (v4i32 VR128X:$src0), + VK2WM:$mask), + (VCVTTPD2UDQSZ128rrk VR128X:$src0, VK2WM:$mask, VR128X:$src)>; +def : Pat<(X86mcvttp2uis (v2f64 VR128X:$src), v4i32x_info.ImmAllZerosV, + VK2WM:$mask), + (VCVTTPD2UDQSZ128rrkz VK2WM:$mask, VR128X:$src)>; +def : Pat<(X86mcvttp2uis (loadv2f64 addr:$src), (v4i32 VR128X:$src0), + VK2WM:$mask), + (VCVTTPD2UDQSZ128rmk VR128X:$src0, VK2WM:$mask, addr:$src)>; +def : Pat<(X86mcvttp2uis (loadv2f64 addr:$src), v4i32x_info.ImmAllZerosV, + VK2WM:$mask), + (VCVTTPD2UDQSZ128rmkz VK2WM:$mask, addr:$src)>; +def : Pat<(X86mcvttp2uis (v2f64 (X86VBroadcastld64 addr:$src)), + (v4i32 VR128X:$src0), VK2WM:$mask), + (VCVTTPD2UDQSZ128rmbk VR128X:$src0, VK2WM:$mask, addr:$src)>; +def : Pat<(X86mcvttp2uis (v2f64 (X86VBroadcastld64 addr:$src)), + v4i32x_info.ImmAllZerosV, VK2WM:$mask), + (VCVTTPD2UDQSZ128rmbkz VK2WM:$mask, addr:$src)>; +} + +// Convert scalar float/double to signed/unsigned int 32/64 with truncation and saturation. +multiclass avx10_cvt_s_ds<bits<8> opc, string asm, X86VectorVTInfo _SrcRC, + X86VectorVTInfo _DstRC, SDPatternOperator OpNode, + SDNode OpNodeInt, SDNode OpNodeSAE, + X86FoldableSchedWrite sched> { + let Predicates = [HasAVX10_2], ExeDomain = _SrcRC.ExeDomain in { + let isCodeGenOnly = 1 in { + def rr : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.FRC:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [(set _DstRC.RC:$dst, (OpNode _SrcRC.FRC:$src, _DstRC.EltVT))]>, + EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; + def rm : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), (ins _SrcRC.ScalarMemOp:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [(set _DstRC.RC:$dst, (OpNode (_SrcRC.ScalarLdFrag addr:$src), _DstRC.EltVT))]>, + EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC; + } + def rr_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [(set _DstRC.RC:$dst, (OpNodeInt (_SrcRC.VT _SrcRC.RC:$src)))]>, + EVEX, VEX_LIG, Sched<[sched]>, SIMD_EXC; + let Uses = [MXCSR] in + def rrb_Int : AVX512<opc, MRMSrcReg, (outs _DstRC.RC:$dst), (ins _SrcRC.RC:$src), + !strconcat(asm,"\t{{sae}, $src, $dst|$dst, $src, {sae}}"), + [(set _DstRC.RC:$dst, (OpNodeSAE (_SrcRC.VT _SrcRC.RC:$src)))]>, + EVEX, VEX_LIG, EVEX_B, Sched<[sched]>; + def rm_Int : AVX512<opc, MRMSrcMem, (outs _DstRC.RC:$dst), + (ins _SrcRC.IntScalarMemOp:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [(set _DstRC.RC:$dst, + (OpNodeInt (_SrcRC.ScalarIntMemFrags addr:$src)))]>, + EVEX, VEX_LIG, Sched<[sched.Folded, sched.ReadAfterFold]>, + SIMD_EXC; + } +} + +defm VCVTTSS2SIS: avx10_cvt_s_ds<0x6D, "vcvttss2sis", f32x_info, i32x_info, + fp_to_sint_sat, X86cvttss2Int, + X86cvttss2IntSAE, WriteCvtSS2I>, + T_MAP5,XS, EVEX_CD8<32, CD8VT1>; +defm VCVTTSS2SI64S: avx10_cvt_s_ds<0x6D, "vcvttss2sis", f32x_info, i64x_info, + fp_to_sint_sat, X86cvttss2Int, + X86cvttss2IntSAE, WriteCvtSS2I>, + REX_W, T_MAP5,XS, EVEX_CD8<32, CD8VT1>; +defm VCVTTSD2SIS: avx10_cvt_s_ds<0x6D, "vcvttsd2sis", f64x_info, i32x_info, + fp_to_sint_sat, X86cvttss2Int, + X86cvttss2IntSAE, WriteCvtSD2I>, + T_MAP5,XD, EVEX_CD8<64, CD8VT1>; +defm VCVTTSD2SI64S: avx10_cvt_s_ds<0x6D, "vcvttsd2sis", f64x_info, i64x_info, + fp_to_sint_sat, X86cvttss2Int, + X86cvttss2IntSAE, WriteCvtSD2I>, + REX_W, T_MAP5,XD, EVEX_CD8<64, CD8VT1>; +defm VCVTTSS2USIS: avx10_cvt_s_ds<0x6C, "vcvttss2usis", f32x_info, i32x_info, + fp_to_uint_sat, X86cvttss2UInt, + X86cvttss2UIntSAE, WriteCvtSS2I>, + T_MAP5,XS, EVEX_CD8<32, CD8VT1>; +defm VCVTTSS2USI64S: avx10_cvt_s_ds<0x6C, "vcvttss2usis", f32x_info, i64x_info, + fp_to_uint_sat, X86cvttss2UInt, + X86cvttss2UIntSAE, WriteCvtSS2I>, + T_MAP5,XS,REX_W, EVEX_CD8<32, CD8VT1>; +defm VCVTTSD2USIS: avx10_cvt_s_ds<0x6C, "vcvttsd2usis", f64x_info, i32x_info, + fp_to_uint_sat, X86cvttss2UInt, + X86cvttss2UIntSAE, WriteCvtSD2I>, + T_MAP5,XD, EVEX_CD8<64, CD8VT1>; +defm VCVTTSD2USI64S: avx10_cvt_s_ds<0x6C, "vcvttsd2usis", f64x_info, i64x_info, + fp_to_uint_sat, X86cvttss2UInt, + X86cvttss2UIntSAE, WriteCvtSD2I>, + T_MAP5,XD, REX_W, EVEX_CD8<64, CD8VT1>; + +//------------------------------------------------- // AVX10 CONVERT instructions //------------------------------------------------- diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 59bfd2b..af39b1a 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -671,6 +671,11 @@ def X86cvts2usi : SDNode<"X86ISD::CVTS2UI", SDTSFloatToInt>; def X86cvts2siRnd : SDNode<"X86ISD::CVTS2SI_RND", SDTSFloatToIntRnd>; def X86cvts2usiRnd : SDNode<"X86ISD::CVTS2UI_RND", SDTSFloatToIntRnd>; +def X86cvttss2Int : SDNode<"X86ISD::CVTTS2SIS", SDTSFloatToInt>; +def X86cvttss2UInt : SDNode<"X86ISD::CVTTS2UIS", SDTSFloatToInt>; +def X86cvttss2IntSAE : SDNode<"X86ISD::CVTTS2SIS_SAE", SDTSFloatToInt>; +def X86cvttss2UIntSAE : SDNode<"X86ISD::CVTTS2UIS_SAE", SDTSFloatToInt>; + // Vector with rounding mode // cvtt fp-to-int staff @@ -680,6 +685,11 @@ def X86cvttp2uiSAE : SDNode<"X86ISD::CVTTP2UI_SAE", SDTFloatToInt>; def X86VSintToFpRnd : SDNode<"X86ISD::SINT_TO_FP_RND", SDTVintToFPRound>; def X86VUintToFpRnd : SDNode<"X86ISD::UINT_TO_FP_RND", SDTVintToFPRound>; +def X86cvttp2sisSAE : SDNode<"X86ISD::CVTTP2SIS_SAE", SDTFloatToInt>; +def X86cvttp2uisSAE : SDNode<"X86ISD::CVTTP2UIS_SAE", SDTFloatToInt>; +def X86cvttp2sis : SDNode<"X86ISD::CVTTP2SIS", SDTFloatToInt>; +def X86cvttp2uis : SDNode<"X86ISD::CVTTP2UIS", SDTFloatToInt>; + // cvt fp-to-int staff def X86cvtp2IntRnd : SDNode<"X86ISD::CVTP2SI_RND", SDTFloatToIntRnd>; def X86cvtp2UIntRnd : SDNode<"X86ISD::CVTP2UI_RND", SDTFloatToIntRnd>; @@ -735,6 +745,8 @@ def X86mcvtp2Int : SDNode<"X86ISD::MCVTP2SI", SDTMFloatToInt>; def X86mcvtp2UInt : SDNode<"X86ISD::MCVTP2UI", SDTMFloatToInt>; def X86mcvttp2si : SDNode<"X86ISD::MCVTTP2SI", SDTMFloatToInt>; def X86mcvttp2ui : SDNode<"X86ISD::MCVTTP2UI", SDTMFloatToInt>; +def X86mcvttp2sis : SDNode<"X86ISD::MCVTTP2SIS", SDTMFloatToInt>; +def X86mcvttp2uis : SDNode<"X86ISD::MCVTTP2UIS", SDTMFloatToInt>; def SDTcvtph2ps : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, f32>, SDTCVecEltisVT<1, i16>]>; diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h index 4f39e66..86fd040 100644 --- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h +++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h @@ -569,12 +569,36 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::CVTP2UI, X86ISD::CVTP2UI_RND), X86_INTRINSIC_DATA(avx10_mask_vcvttpd2dq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2dqs_128, CVTPD2DQ_MASK, + X86ISD::CVTTP2SIS, X86ISD::MCVTTP2SIS), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2dqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2dqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttpd2qq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2qqs_128, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, 0), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2qqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2qqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttpd2udq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2UI, X86ISD::CVTTP2UI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2udqs_128, CVTPD2DQ_MASK, + X86ISD::CVTTP2UIS, X86ISD::MCVTTP2SIS), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2udqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2udqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttpd2uqq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2UI, X86ISD::CVTTP2UI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2uqqs_128, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, 0), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2uqqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttpd2uqqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttph2dq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttph2ibs128, INTR_TYPE_1OP_MASK, @@ -601,6 +625,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttps2dq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2dqs_128, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, 0), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2dqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2dqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttps2ibs128, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2IBS, 0), X86_INTRINSIC_DATA(avx10_mask_vcvttps2ibs256, INTR_TYPE_1OP_MASK, @@ -615,10 +645,28 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::CVTTP2IUBS, X86ISD::CVTTP2IUBS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttps2qq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2SI, X86ISD::CVTTP2SI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2qqs_128, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, 0), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2qqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2qqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2SIS, X86ISD::CVTTP2SIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttps2udq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2UI, X86ISD::CVTTP2UI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2udqs_128, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, 0), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2udqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2udqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vcvttps2uqq256, INTR_TYPE_1OP_MASK, X86ISD::CVTTP2UI, X86ISD::CVTTP2UI_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2uqqs_128, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, 0), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2uqqs_round_256, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), + X86_INTRINSIC_DATA(avx10_mask_vcvttps2uqqs_round_512, INTR_TYPE_1OP_MASK, + X86ISD::CVTTP2UIS, X86ISD::CVTTP2UIS_SAE), X86_INTRINSIC_DATA(avx10_mask_vfcmaddcph256, CFMA_OP_MASK, X86ISD::VFCMADDC, X86ISD::VFCMADDC_RND), X86_INTRINSIC_DATA(avx10_mask_vfcmulcph256, INTR_TYPE_2OP_MASK, @@ -757,6 +805,22 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::CVTTP2IUBS, 0), X86_INTRINSIC_DATA(avx10_vcvttnebf162iubs512, INTR_TYPE_1OP, X86ISD::CVTTP2IUBS, 0), + X86_INTRINSIC_DATA(avx10_vcvttsd2sis, INTR_TYPE_1OP_SAE, X86ISD::CVTTS2SIS, + X86ISD::CVTTS2SIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttsd2sis64, INTR_TYPE_1OP_SAE, + X86ISD::CVTTS2SIS, X86ISD::CVTTS2SIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttsd2usis, INTR_TYPE_1OP_SAE, X86ISD::CVTTS2UIS, + X86ISD::CVTTS2UIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttsd2usis64, INTR_TYPE_1OP_SAE, + X86ISD::CVTTS2UIS, X86ISD::CVTTS2UIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttss2sis, INTR_TYPE_1OP_SAE, X86ISD::CVTTS2SIS, + X86ISD::CVTTS2SIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttss2sis64, INTR_TYPE_1OP_SAE, + X86ISD::CVTTS2SIS, X86ISD::CVTTS2SIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttss2usis, INTR_TYPE_1OP_SAE, X86ISD::CVTTS2UIS, + X86ISD::CVTTS2UIS_SAE), + X86_INTRINSIC_DATA(avx10_vcvttss2usis64, INTR_TYPE_1OP_SAE, + X86ISD::CVTTS2UIS, X86ISD::CVTTS2UIS_SAE), X86_INTRINSIC_DATA(avx10_vdivpd256, INTR_TYPE_2OP, ISD::FDIV, X86ISD::FDIV_RND), X86_INTRINSIC_DATA(avx10_vdivph256, INTR_TYPE_2OP, ISD::FDIV, @@ -834,6 +898,7 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::FSUB_RND), X86_INTRINSIC_DATA(avx10_vsubps256, INTR_TYPE_2OP, ISD::FSUB, X86ISD::FSUB_RND), + X86_INTRINSIC_DATA(avx2_mpsadbw, INTR_TYPE_3OP_IMM8, X86ISD::MPSADBW, 0), X86_INTRINSIC_DATA(avx2_packssdw, INTR_TYPE_2OP, X86ISD::PACKSS, 0), X86_INTRINSIC_DATA(avx2_packsswb, INTR_TYPE_2OP, X86ISD::PACKSS, 0), diff --git a/llvm/lib/Target/X86/X86ScheduleZnver4.td b/llvm/lib/Target/X86/X86ScheduleZnver4.td index 420c429..6181ee8 100644 --- a/llvm/lib/Target/X86/X86ScheduleZnver4.td +++ b/llvm/lib/Target/X86/X86ScheduleZnver4.td @@ -1855,6 +1855,14 @@ def Zn4WriteVZeroIdiomLogicX : SchedWriteVariant<[ // NOTE: PXORrr,PANDNrr are not zero-cycle! def : InstRW<[Zn4WriteVZeroIdiomLogicX], (instrs VPXORrr, VPANDNrr)>; +// TODO: This should be extended to incorporate all of the AVX512 zeroing +// idioms that can be executed by the renamer. +def Zn4WriteVZeroIdiomLogicZ : SchedWriteVariant<[ + SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>, + SchedVar<NoSchedPred, [WriteVecLogicZ]> +]>; +def : InstRW<[Zn4WriteVZeroIdiomLogicZ], (instrs VPXORDZrr)>; + def Zn4WriteVZeroIdiomLogicY : SchedWriteVariant<[ SchedVar<MCSchedPredicate<ZeroIdiomPredicate>, [Zn4WriteZeroLatency]>, SchedVar<NoSchedPred, [WriteVecLogicY]> @@ -1930,6 +1938,12 @@ def : IsZeroIdiomFunction<[ VPSUBUSBYrr, VPSUBUSWYrr, VPCMPGTBYrr, VPCMPGTWYrr, VPCMPGTDYrr, VPCMPGTQYrr ], ZeroIdiomPredicate>, + + // AVX ZMM Zero-idioms. + // TODO: This should be expanded to incorporate all AVX512 zeroing idioms. + DepBreakingClass<[ + VPXORDZrr + ], ZeroIdiomPredicate>, ]>; def : IsDepBreakingFunction<[ diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp index ff0d7817..261731f 100644 --- a/llvm/lib/Transforms/IPO/FunctionImport.cpp +++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp @@ -722,7 +722,7 @@ class WorkloadImportsManager : public ModuleImportsManager { return; } const auto &CtxMap = *Ctx; - DenseSet<GlobalValue::GUID> ContainedGUIDs; + SetVector<GlobalValue::GUID> ContainedGUIDs; for (const auto &[RootGuid, Root] : CtxMap) { // Avoid ContainedGUIDs to get in/out of scope. Reuse its memory for // subsequent roots, but clear its contents. diff --git a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index c235d2fb..f3f5ffb 100644 --- a/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -770,8 +770,7 @@ static bool unswitchTrivialSwitch(Loop &L, SwitchInst &SI, DominatorTree &DT, // instruction in the block. auto *TI = BBToCheck.getTerminator(); bool isUnreachable = isa<UnreachableInst>(TI); - return !isUnreachable || - (isUnreachable && (BBToCheck.getFirstNonPHIOrDbg() != TI)); + return !isUnreachable || BBToCheck.getFirstNonPHIOrDbg() != TI; }; SmallVector<int, 4> ExitCaseIndices; diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp index 3e711809..e993e56 100644 --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -2652,8 +2652,12 @@ bool VectorCombine::shrinkType(llvm::Instruction &I) { return false; Value *Op0 = ZExted; - if (auto *OI = dyn_cast<Instruction>(OtherOperand)) - Builder.SetInsertPoint(OI->getNextNode()); + if (auto *OI = dyn_cast<Instruction>(OtherOperand)) { + if (isa<PHINode>(OI)) + Builder.SetInsertPoint(OI->getParent()->getFirstInsertionPt()); + else + Builder.SetInsertPoint(OI->getNextNode()); + } Value *Op1 = Builder.CreateTrunc(OtherOperand, SmallTy); Builder.SetInsertPoint(&I); // Keep the order of operands the same diff --git a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll index 5236f5a..e92b2b3 100644 --- a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll +++ b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll @@ -1,25 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=128 < %s | FileCheck %s +; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN ; Check that we don't crash querying costs when vectors are not enabled. ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -define i32 @fadd() { +define void @fadd() { ; CHECK-LABEL: 'fadd' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fadd float undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fadd double undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fadd <1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fadd <2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fadd <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fadd <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fadd <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fadd <32 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fadd <vscale x 1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fadd <vscale x 2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fadd <vscale x 4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fadd <vscale x 8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fadd <vscale x 16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fadd <vscale x 32 x half> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fadd <1 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fadd <2 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fadd <4 x float> undef, undef @@ -38,26 +26,11 @@ define i32 @fadd() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fadd <vscale x 2 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fadd <vscale x 4 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fadd <vscale x 8 x double> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = fadd half undef, undef %F32 = fadd float undef, undef %F64 = fadd double undef, undef - %V1F16 = fadd <1 x half> undef, undef - %V2F16 = fadd <2 x half> undef, undef - %V4F16 = fadd <4 x half> undef, undef - %V8F16 = fadd <8 x half> undef, undef - %V16F16 = fadd <16 x half> undef, undef - %V32F16 = fadd <32 x half> undef, undef - - %NXV1F16 = fadd <vscale x 1 x half> undef, undef - %NXV2F16 = fadd <vscale x 2 x half> undef, undef - %NXV4F16 = fadd <vscale x 4 x half> undef, undef - %NXV8F16 = fadd <vscale x 8 x half> undef, undef - %NXV16F16 = fadd <vscale x 16 x half> undef, undef - %NXV32F16 = fadd <vscale x 32 x half> undef, undef - %V1F32 = fadd <1 x float> undef, undef %V2F32 = fadd <2 x float> undef, undef %V4F32 = fadd <4 x float> undef, undef @@ -80,26 +53,49 @@ define i32 @fadd() { %NXV4F64 = fadd <vscale x 4 x double> undef, undef %NXV8F64 = fadd <vscale x 8 x double> undef, undef - ret i32 undef + ret void } -define i32 @fsub() { +define void @fadd_f16() { +; CHECK-LABEL: 'fadd_f16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fadd <1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fadd <2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fadd <4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fadd <8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fadd <16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fadd <32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fadd <vscale x 1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fadd <vscale x 2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fadd <vscale x 4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fadd <vscale x 8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fadd <vscale x 16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fadd <vscale x 32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = fadd half undef, undef + + %V1F16 = fadd <1 x half> undef, undef + %V2F16 = fadd <2 x half> undef, undef + %V4F16 = fadd <4 x half> undef, undef + %V8F16 = fadd <8 x half> undef, undef + %V16F16 = fadd <16 x half> undef, undef + %V32F16 = fadd <32 x half> undef, undef + + %NXV1F16 = fadd <vscale x 1 x half> undef, undef + %NXV2F16 = fadd <vscale x 2 x half> undef, undef + %NXV4F16 = fadd <vscale x 4 x half> undef, undef + %NXV8F16 = fadd <vscale x 8 x half> undef, undef + %NXV16F16 = fadd <vscale x 16 x half> undef, undef + %NXV32F16 = fadd <vscale x 32 x half> undef, undef + + ret void +} + +define void @fsub() { ; CHECK-LABEL: 'fsub' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fsub float undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fsub double undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fsub <1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fsub <2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fsub <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fsub <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fsub <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fsub <32 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fsub <vscale x 1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fsub <vscale x 2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fsub <vscale x 4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fsub <vscale x 8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fsub <vscale x 16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fsub <vscale x 32 x half> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fsub <1 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fsub <2 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fsub <4 x float> undef, undef @@ -118,26 +114,11 @@ define i32 @fsub() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fsub <vscale x 2 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fsub <vscale x 4 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fsub <vscale x 8 x double> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = fsub half undef, undef %F32 = fsub float undef, undef %F64 = fsub double undef, undef - %V1F16 = fsub <1 x half> undef, undef - %V2F16 = fsub <2 x half> undef, undef - %V4F16 = fsub <4 x half> undef, undef - %V8F16 = fsub <8 x half> undef, undef - %V16F16 = fsub <16 x half> undef, undef - %V32F16 = fsub <32 x half> undef, undef - - %NXV1F16 = fsub <vscale x 1 x half> undef, undef - %NXV2F16 = fsub <vscale x 2 x half> undef, undef - %NXV4F16 = fsub <vscale x 4 x half> undef, undef - %NXV8F16 = fsub <vscale x 8 x half> undef, undef - %NXV16F16 = fsub <vscale x 16 x half> undef, undef - %NXV32F16 = fsub <vscale x 32 x half> undef, undef - %V1F32 = fsub <1 x float> undef, undef %V2F32 = fsub <2 x float> undef, undef %V4F32 = fsub <4 x float> undef, undef @@ -160,26 +141,49 @@ define i32 @fsub() { %NXV4F64 = fsub <vscale x 4 x double> undef, undef %NXV8F64 = fsub <vscale x 8 x double> undef, undef - ret i32 undef + ret void } -define i32 @fmul() { +define void @fsub_f16() { +; CHECK-LABEL: 'fsub_f16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fsub <1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fsub <2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fsub <4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fsub <8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fsub <16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fsub <32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fsub <vscale x 1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fsub <vscale x 2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fsub <vscale x 4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fsub <vscale x 8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fsub <vscale x 16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fsub <vscale x 32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = fsub half undef, undef + + %V1F16 = fsub <1 x half> undef, undef + %V2F16 = fsub <2 x half> undef, undef + %V4F16 = fsub <4 x half> undef, undef + %V8F16 = fsub <8 x half> undef, undef + %V16F16 = fsub <16 x half> undef, undef + %V32F16 = fsub <32 x half> undef, undef + + %NXV1F16 = fsub <vscale x 1 x half> undef, undef + %NXV2F16 = fsub <vscale x 2 x half> undef, undef + %NXV4F16 = fsub <vscale x 4 x half> undef, undef + %NXV8F16 = fsub <vscale x 8 x half> undef, undef + %NXV16F16 = fsub <vscale x 16 x half> undef, undef + %NXV32F16 = fsub <vscale x 32 x half> undef, undef + + ret void +} + +define void @fmul() { ; CHECK-LABEL: 'fmul' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fmul float undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fmul double undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fmul <1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fmul <2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fmul <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fmul <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fmul <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fmul <32 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fmul <vscale x 1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fmul <vscale x 2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fmul <vscale x 4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fmul <vscale x 8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fmul <vscale x 16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fmul <vscale x 32 x half> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fmul <1 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fmul <2 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fmul <4 x float> undef, undef @@ -198,26 +202,11 @@ define i32 @fmul() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fmul <vscale x 2 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fmul <vscale x 4 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fmul <vscale x 8 x double> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = fmul half undef, undef %F32 = fmul float undef, undef %F64 = fmul double undef, undef - %V1F16 = fmul <1 x half> undef, undef - %V2F16 = fmul <2 x half> undef, undef - %V4F16 = fmul <4 x half> undef, undef - %V8F16 = fmul <8 x half> undef, undef - %V16F16 = fmul <16 x half> undef, undef - %V32F16 = fmul <32 x half> undef, undef - - %NXV1F16 = fmul <vscale x 1 x half> undef, undef - %NXV2F16 = fmul <vscale x 2 x half> undef, undef - %NXV4F16 = fmul <vscale x 4 x half> undef, undef - %NXV8F16 = fmul <vscale x 8 x half> undef, undef - %NXV16F16 = fmul <vscale x 16 x half> undef, undef - %NXV32F16 = fmul <vscale x 32 x half> undef, undef - %V1F32 = fmul <1 x float> undef, undef %V2F32 = fmul <2 x float> undef, undef %V4F32 = fmul <4 x float> undef, undef @@ -240,26 +229,49 @@ define i32 @fmul() { %NXV4F64 = fmul <vscale x 4 x double> undef, undef %NXV8F64 = fmul <vscale x 8 x double> undef, undef - ret i32 undef + ret void +} + +define void @fmul_f16() { +; CHECK-LABEL: 'fmul_f16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fmul <1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fmul <2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fmul <4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fmul <8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fmul <16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fmul <32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fmul <vscale x 1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fmul <vscale x 2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fmul <vscale x 4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fmul <vscale x 8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fmul <vscale x 16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fmul <vscale x 32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = fmul half undef, undef + + %V1F16 = fmul <1 x half> undef, undef + %V2F16 = fmul <2 x half> undef, undef + %V4F16 = fmul <4 x half> undef, undef + %V8F16 = fmul <8 x half> undef, undef + %V16F16 = fmul <16 x half> undef, undef + %V32F16 = fmul <32 x half> undef, undef + + %NXV1F16 = fmul <vscale x 1 x half> undef, undef + %NXV2F16 = fmul <vscale x 2 x half> undef, undef + %NXV4F16 = fmul <vscale x 4 x half> undef, undef + %NXV8F16 = fmul <vscale x 8 x half> undef, undef + %NXV16F16 = fmul <vscale x 16 x half> undef, undef + %NXV32F16 = fmul <vscale x 32 x half> undef, undef + + ret void } -define i32 @fdiv() { +define void @fdiv() { ; CHECK-LABEL: 'fdiv' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fdiv float undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fdiv double undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fdiv <1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fdiv <2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fdiv <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fdiv <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fdiv <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fdiv <32 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fdiv <vscale x 1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fdiv <vscale x 2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fdiv <vscale x 4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fdiv <vscale x 8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fdiv <vscale x 16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fdiv <vscale x 32 x half> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fdiv <1 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fdiv <2 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fdiv <4 x float> undef, undef @@ -278,26 +290,11 @@ define i32 @fdiv() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fdiv <vscale x 2 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fdiv <vscale x 4 x double> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fdiv <vscale x 8 x double> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = fdiv half undef, undef %F32 = fdiv float undef, undef %F64 = fdiv double undef, undef - %V1F16 = fdiv <1 x half> undef, undef - %V2F16 = fdiv <2 x half> undef, undef - %V4F16 = fdiv <4 x half> undef, undef - %V8F16 = fdiv <8 x half> undef, undef - %V16F16 = fdiv <16 x half> undef, undef - %V32F16 = fdiv <32 x half> undef, undef - - %NXV1F16 = fdiv <vscale x 1 x half> undef, undef - %NXV2F16 = fdiv <vscale x 2 x half> undef, undef - %NXV4F16 = fdiv <vscale x 4 x half> undef, undef - %NXV8F16 = fdiv <vscale x 8 x half> undef, undef - %NXV16F16 = fdiv <vscale x 16 x half> undef, undef - %NXV32F16 = fdiv <vscale x 32 x half> undef, undef - %V1F32 = fdiv <1 x float> undef, undef %V2F32 = fdiv <2 x float> undef, undef %V4F32 = fdiv <4 x float> undef, undef @@ -320,26 +317,49 @@ define i32 @fdiv() { %NXV4F64 = fdiv <vscale x 4 x double> undef, undef %NXV8F64 = fdiv <vscale x 8 x double> undef, undef - ret i32 undef + ret void +} + +define void @fdiv_f16() { +; CHECK-LABEL: 'fdiv_f16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fdiv <1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fdiv <2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fdiv <4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fdiv <8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fdiv <16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fdiv <32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fdiv <vscale x 1 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fdiv <vscale x 2 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fdiv <vscale x 4 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fdiv <vscale x 8 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fdiv <vscale x 16 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fdiv <vscale x 32 x half> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = fdiv half undef, undef + + %V1F16 = fdiv <1 x half> undef, undef + %V2F16 = fdiv <2 x half> undef, undef + %V4F16 = fdiv <4 x half> undef, undef + %V8F16 = fdiv <8 x half> undef, undef + %V16F16 = fdiv <16 x half> undef, undef + %V32F16 = fdiv <32 x half> undef, undef + + %NXV1F16 = fdiv <vscale x 1 x half> undef, undef + %NXV2F16 = fdiv <vscale x 2 x half> undef, undef + %NXV4F16 = fdiv <vscale x 4 x half> undef, undef + %NXV8F16 = fdiv <vscale x 8 x half> undef, undef + %NXV16F16 = fdiv <vscale x 16 x half> undef, undef + %NXV32F16 = fdiv <vscale x 32 x half> undef, undef + + ret void } -define i32 @frem() { +define void @frem() { ; CHECK-LABEL: 'frem' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16 = frem <1 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2F16 = frem <2 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V4F16 = frem <4 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F16 = frem <8 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F16 = frem <16 x half> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32F16 = frem <32 x half> undef, undef -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = frem <vscale x 1 x half> undef, undef -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = frem <vscale x 2 x half> undef, undef -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = frem <vscale x 4 x half> undef, undef -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = frem <vscale x 8 x half> undef, undef -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = frem <vscale x 16 x half> undef, undef -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = frem <vscale x 32 x half> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F32 = frem <1 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2F32 = frem <2 x float> undef, undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V4F32 = frem <4 x float> undef, undef @@ -358,26 +378,11 @@ define i32 @frem() { ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F64 = frem <vscale x 2 x double> undef, undef ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F64 = frem <vscale x 4 x double> undef, undef ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F64 = frem <vscale x 8 x double> undef, undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = frem half undef, undef %F32 = frem float undef, undef %F64 = frem double undef, undef - %V1F16 = frem <1 x half> undef, undef - %V2F16 = frem <2 x half> undef, undef - %V4F16 = frem <4 x half> undef, undef - %V8F16 = frem <8 x half> undef, undef - %V16F16 = frem <16 x half> undef, undef - %V32F16 = frem <32 x half> undef, undef - - %NXV1F16 = frem <vscale x 1 x half> undef, undef - %NXV2F16 = frem <vscale x 2 x half> undef, undef - %NXV4F16 = frem <vscale x 4 x half> undef, undef - %NXV8F16 = frem <vscale x 8 x half> undef, undef - %NXV16F16 = frem <vscale x 16 x half> undef, undef - %NXV32F16 = frem <vscale x 32 x half> undef, undef - %V1F32 = frem <1 x float> undef, undef %V2F32 = frem <2 x float> undef, undef %V4F32 = frem <4 x float> undef, undef @@ -400,26 +405,65 @@ define i32 @frem() { %NXV4F64 = frem <vscale x 4 x double> undef, undef %NXV8F64 = frem <vscale x 8 x double> undef, undef - ret i32 undef + ret void } -define i32 @fneg() { +define void @frem_f16() { +; ZVFH-LABEL: 'frem_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16 = frem <1 x half> undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2F16 = frem <2 x half> undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V4F16 = frem <4 x half> undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F16 = frem <8 x half> undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F16 = frem <16 x half> undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32F16 = frem <32 x half> undef, undef +; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = frem <vscale x 1 x half> undef, undef +; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = frem <vscale x 2 x half> undef, undef +; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = frem <vscale x 4 x half> undef, undef +; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = frem <vscale x 8 x half> undef, undef +; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = frem <vscale x 16 x half> undef, undef +; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = frem <vscale x 32 x half> undef, undef +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'frem_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16 = frem <1 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V2F16 = frem <2 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V4F16 = frem <4 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F16 = frem <8 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F16 = frem <16 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32F16 = frem <32 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = frem <vscale x 1 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = frem <vscale x 2 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = frem <vscale x 4 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8F16 = frem <vscale x 8 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV16F16 = frem <vscale x 16 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV32F16 = frem <vscale x 32 x half> undef, undef +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = frem half undef, undef + + %V1F16 = frem <1 x half> undef, undef + %V2F16 = frem <2 x half> undef, undef + %V4F16 = frem <4 x half> undef, undef + %V8F16 = frem <8 x half> undef, undef + %V16F16 = frem <16 x half> undef, undef + %V32F16 = frem <32 x half> undef, undef + + %NXV1F16 = frem <vscale x 1 x half> undef, undef + %NXV2F16 = frem <vscale x 2 x half> undef, undef + %NXV4F16 = frem <vscale x 4 x half> undef, undef + %NXV8F16 = frem <vscale x 8 x half> undef, undef + %NXV16F16 = frem <vscale x 16 x half> undef, undef + %NXV32F16 = frem <vscale x 32 x half> undef, undef + + ret void +} + +define void @fneg() { ; CHECK-LABEL: 'fneg' -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fneg half undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fneg float undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fneg double undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fneg <1 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fneg <2 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fneg <4 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fneg <8 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fneg <16 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fneg <32 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fneg <vscale x 1 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fneg <vscale x 2 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fneg <vscale x 4 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fneg <vscale x 8 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fneg <vscale x 16 x half> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fneg <vscale x 32 x half> undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fneg <1 x float> undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fneg <2 x float> undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fneg <4 x float> undef @@ -438,26 +482,11 @@ define i32 @fneg() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fneg <vscale x 2 x double> undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fneg <vscale x 4 x double> undef ; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fneg <vscale x 8 x double> undef -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = fneg half undef %F32 = fneg float undef %F64 = fneg double undef - %V1F16 = fneg <1 x half> undef - %V2F16 = fneg <2 x half> undef - %V4F16 = fneg <4 x half> undef - %V8F16 = fneg <8 x half> undef - %V16F16 = fneg <16 x half> undef - %V32F16 = fneg <32 x half> undef - - %NXV1F16 = fneg <vscale x 1 x half> undef - %NXV2F16 = fneg <vscale x 2 x half> undef - %NXV4F16 = fneg <vscale x 4 x half> undef - %NXV8F16 = fneg <vscale x 8 x half> undef - %NXV16F16 = fneg <vscale x 16 x half> undef - %NXV32F16 = fneg <vscale x 32 x half> undef - %V1F32 = fneg <1 x float> undef %V2F32 = fneg <2 x float> undef %V4F32 = fneg <4 x float> undef @@ -480,26 +509,49 @@ define i32 @fneg() { %NXV4F64 = fneg <vscale x 4 x double> undef %NXV8F64 = fneg <vscale x 8 x double> undef - ret i32 undef + ret void } -define i32 @fcopysign() { +define void @fneg_f16() { +; CHECK-LABEL: 'fneg_f16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fneg half undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fneg <1 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fneg <2 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fneg <4 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fneg <8 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fneg <16 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fneg <32 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fneg <vscale x 1 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fneg <vscale x 2 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fneg <vscale x 4 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fneg <vscale x 8 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fneg <vscale x 16 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fneg <vscale x 32 x half> undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = fneg half undef + + %V1F16 = fneg <1 x half> undef + %V2F16 = fneg <2 x half> undef + %V4F16 = fneg <4 x half> undef + %V8F16 = fneg <8 x half> undef + %V16F16 = fneg <16 x half> undef + %V32F16 = fneg <32 x half> undef + + %NXV1F16 = fneg <vscale x 1 x half> undef + %NXV2F16 = fneg <vscale x 2 x half> undef + %NXV4F16 = fneg <vscale x 4 x half> undef + %NXV8F16 = fneg <vscale x 8 x half> undef + %NXV16F16 = fneg <vscale x 16 x half> undef + %NXV32F16 = fneg <vscale x 32 x half> undef + + ret void +} + +define void @fcopysign() { ; CHECK-LABEL: 'fcopysign' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half undef, half undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> undef, <1 x float> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef) @@ -518,26 +570,11 @@ define i32 @fcopysign() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F64 = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F64 = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = call half @llvm.copysign.f16(half undef, half undef) %F32 = call float @llvm.copysign.f32(float undef, float undef) %F64 = call double @llvm.copysign.f64(double undef, double undef) - %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef) - %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) - %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) - %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) - %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) - %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef) - - %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) - %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) - %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) - %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) - %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) - %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef) - %V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> undef, <1 x float> undef) %V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef) %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef) @@ -560,26 +597,65 @@ define i32 @fcopysign() { %NXV4F64 = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef) %NXV8F64 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef) - ret i32 undef + ret void +} + +define void @fcopysign_f16() { +; ZVFH-LABEL: 'fcopysign_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'fcopysign_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 95 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = call half @llvm.copysign.f16(half undef, half undef) + + %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef) + %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) + %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) + %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) + %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) + %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef) + + %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) + %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) + %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) + %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) + %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) + %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef) + + ret void } -define i32 @fma() { +define void @fma() { ; CHECK-LABEL: 'fma' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half undef, half undef, half undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> undef, <1 x float> undef, <1 x float> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) @@ -598,26 +674,11 @@ define i32 @fma() { ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %F16 = call half @llvm.fma.f16(half undef, half undef, half undef) %F32 = call float @llvm.fma.f32(float undef, float undef, float undef) %F64 = call double @llvm.fma.f64(double undef, double undef, double undef) - %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef) - %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) - %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) - %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) - %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) - %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef) - - %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) - %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) - %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) - %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) - %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) - %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef) - %V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> undef, <1 x float> undef, <1 x float> undef) %V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) @@ -640,50 +701,87 @@ define i32 @fma() { %NXV4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) %NXV8F64 = call <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef) - ret i32 undef + ret void +} + +define void @fma_f16() { +; ZVFH-LABEL: 'fma_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half undef, half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'fma_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half undef, half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %F16 = call half @llvm.fma.f16(half undef, half undef, half undef) + + %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef) + %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) + %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) + %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) + %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) + %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef) + + %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) + %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) + %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) + %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) + %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) + %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef) + + ret void } define void @fmuladd() { ; CHECK-LABEL: 'fmuladd' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half undef, half undef, half undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call float @llvm.fmuladd.f32(float undef, float undef, float undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call double @llvm.fmuladd.f64(double undef, double undef, double undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %30 = call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x double> undef, <vscale x 16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.fmuladd.f32(float undef, float undef, float undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call double @llvm.fmuladd.f64(double undef, double undef, double undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x double> undef, <vscale x 16 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - call half @llvm.fmuladd.f16(half undef, half undef, half undef) call float @llvm.fmuladd.f32(float undef, float undef, float undef) call double @llvm.fmuladd.f64(double undef, double undef, double undef) - call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) - call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) - call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) - call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef) call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef) call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef) @@ -692,11 +790,6 @@ define void @fmuladd() { call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef) call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef) call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef) - call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) - call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) - call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) - call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) - call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef) call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef) call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef) @@ -710,113 +803,42 @@ define void @fmuladd() { ret void } -declare half @llvm.copysign.f16(half, half) -declare float @llvm.copysign.f32(float, float) -declare double @llvm.copysign.f64(double, double) - -declare <1 x half> @llvm.copysign.v1f16(<1 x half>, <1 x half>) -declare <2 x half> @llvm.copysign.v2f16(<2 x half>, <2 x half>) -declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>) -declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>) -declare <16 x half> @llvm.copysign.v16f16(<16 x half>, <16 x half>) -declare <32 x half> @llvm.copysign.v32f16(<32 x half>, <32 x half>) - -declare <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>) -declare <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>) -declare <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>) -declare <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) -declare <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>) -declare <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>) - -declare <1 x float> @llvm.copysign.v1f32(<1 x float>, <1 x float>) -declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) -declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) -declare <8 x float> @llvm.copysign.v8f32(<8 x float>, <8 x float>) -declare <16 x float> @llvm.copysign.v16f32(<16 x float>, <16 x float>) - -declare <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>) - -declare <1 x double> @llvm.copysign.v1f64(<1 x double>, <1 x double>) -declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) -declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) -declare <8 x double> @llvm.copysign.v8f64(<8 x double>, <8 x double>) - -declare <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>) - -declare half @llvm.fma.f16(half, half, half) -declare float @llvm.fma.f32(float, float, float) -declare double @llvm.fma.f64(double, double, double) - -declare <1 x half> @llvm.fma.v1f16(<1 x half>, <1 x half>, <1 x half>) -declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) -declare <4 x half> @llvm.fma.v4f16(<4 x half>, <4 x half>, <4 x half>) -declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>) -declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>) -declare <32 x half> @llvm.fma.v32f16(<32 x half>, <32 x half>, <32 x half>) - -declare <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>) -declare <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>) -declare <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>) -declare <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) -declare <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>) -declare <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>) - -declare <1 x float> @llvm.fma.v1f32(<1 x float>, <1 x float>, <1 x float>) -declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) -declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) -declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) -declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) - -declare <vscale x 1 x float> @llvm.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>) - -declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) -declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) -declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) -declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) - -declare <vscale x 1 x double> @llvm.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>) - -declare half @llvm.fmuladd.f16(half, half, half) -declare float @llvm.fmuladd.f32(float, float, float) -declare double @llvm.fmuladd.f64(double, double, double) -declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) -declare <4 x half> @llvm.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>) -declare <8 x half> @llvm.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>) -declare <16 x half> @llvm.fmuladd.v16f16(<16 x half>, <16 x half>, <16 x half>) -declare <2 x float> @llvm.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>) -declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>) -declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>) -declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>) -declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>) -declare <4 x double> @llvm.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>) -declare <8 x double> @llvm.fmuladd.v8f64(<8 x double>, <8 x double>, <8 x double>) -declare <16 x double> @llvm.fmuladd.v16f64(<16 x double>, <16 x double>, <16 x double>) -declare <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>) -declare <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>) -declare <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>) -declare <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>) -declare <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>) -declare <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>) -declare <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>) -declare <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double>, <vscale x 16 x double>, <vscale x 16 x double>) +define void @fmuladd_f16() { +; ZVFH-LABEL: 'fmuladd_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half undef, half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'fmuladd_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half undef, half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.fmuladd.f16(half undef, half undef, half undef) + call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef) + call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef) + ret void +} diff --git a/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll b/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll index f8fe84c..64dc264 100644 --- a/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll +++ b/llvm/test/Analysis/CostModel/RISCV/fp-min-max-abs.ll @@ -1,48 +1,30 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s --check-prefixes=CHECK,ZVFHMIN define void @fabs() { ; CHECK-LABEL: 'fabs' -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fabs.f16(half undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.fabs.v2f16(<2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.fabs.v4f16(<4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.fabs.v8f16(<8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.fabs.v16f16(<16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call float @llvm.fabs.f32(float undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x float> @llvm.fabs.v2f32(<2 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %20 = call double @llvm.fabs.f64(double undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %24 = call <16 x double> @llvm.fabs.v16f64(<16 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %25 = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> undef) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %28 = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.fabs.f32(float undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x float> @llvm.fabs.v2f32(<2 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x float> @llvm.fabs.v8f32(<8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x float> @llvm.fabs.v16f32(<16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call double @llvm.fabs.f64(double undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x double> @llvm.fabs.v2f64(<2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x double> @llvm.fabs.v4f64(<4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x double> @llvm.fabs.v8f64(<8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x double> @llvm.fabs.v16f64(<16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> undef) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - - call half @llvm.fabs.f16(half undef) - call <2 x half> @llvm.fabs.v2f16(<2 x half> undef) - call <4 x half> @llvm.fabs.v4f16(<4 x half> undef) - call <8 x half> @llvm.fabs.v8f16(<8 x half> undef) - call <16 x half> @llvm.fabs.v16f16(<16 x half> undef) - call <vscale x 2 x half> @llvm.fabs.nvx2f16(<vscale x 2 x half> undef) - call <vscale x 4 x half> @llvm.fabs.nvx4f16(<vscale x 4 x half> undef) - call <vscale x 8 x half> @llvm.fabs.nvx8f16(<vscale x 8 x half> undef) - call <vscale x 16 x half> @llvm.fabs.nvx16f16(<vscale x 16 x half> undef) call float @llvm.fabs.f32(float undef) call <2 x float> @llvm.fabs.v2f32(<2 x float> undef) call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) @@ -65,6 +47,31 @@ define void @fabs() { ret void } +define void @fabs_f16() { +; CHECK-LABEL: 'fabs_f16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fabs.f16(half undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.fabs.v2f16(<2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.fabs.v4f16(<4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.fabs.v8f16(<8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.fabs.v16f16(<16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.fabs.f16(half undef) + call <2 x half> @llvm.fabs.v2f16(<2 x half> undef) + call <4 x half> @llvm.fabs.v4f16(<4 x half> undef) + call <8 x half> @llvm.fabs.v8f16(<8 x half> undef) + call <16 x half> @llvm.fabs.v16f16(<16 x half> undef) + call <vscale x 2 x half> @llvm.fabs.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.fabs.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.fabs.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.fabs.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @minnum() { ; CHECK-LABEL: 'minnum' ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.minnum.f32(float undef, float undef) @@ -110,6 +117,46 @@ define void @minnum() { ret void } +define void @minnum_f16() { +; ZVFH-LABEL: 'minnum_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.minnum.f16(half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.minnum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.minnum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.minnum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.minnum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.minnum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'minnum_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.minnum.f16(half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.minnum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.minnum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.minnum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.minnum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.minnum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.minnum.f16(half undef, half undef) + call <2 x half> @llvm.minnum.v2f16(<2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.minnum.v4f16(<4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.minnum.v8f16(<8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.minnum.v16f16(<16 x half> undef, <16 x half> undef) + call <vscale x 1 x half> @llvm.minnum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.minnum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.minnum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.minnum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.minnum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) + ret void +} + define void @maxnum() { ; CHECK-LABEL: 'maxnum' ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.maxnum.f32(float undef, float undef) @@ -155,6 +202,46 @@ define void @maxnum() { ret void } +define void @maxnum_f16() { +; ZVFH-LABEL: 'maxnum_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.maxnum.f16(half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.maxnum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.maxnum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.maxnum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.maxnum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.maxnum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.maxnum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'maxnum_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.maxnum.f16(half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.maxnum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.maxnum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.maxnum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.maxnum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.maxnum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.maxnum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.maxnum.f16(half undef, half undef) + call <2 x half> @llvm.maxnum.v2f16(<2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.maxnum.v4f16(<4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.maxnum.v8f16(<8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.maxnum.v16f16(<16 x half> undef, <16 x half> undef) + call <vscale x 1 x half> @llvm.maxnum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.maxnum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.maxnum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.maxnum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.maxnum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) + ret void +} + define void @minimum() { ; CHECK-LABEL: 'minimum' ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call float @llvm.minimum.f32(float undef, float undef) @@ -200,6 +287,46 @@ define void @minimum() { ret void } +define void @minimum_f16() { +; ZVFH-LABEL: 'minimum_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call half @llvm.minimum.f16(half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.minimum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.minimum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.minimum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.minimum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.minimum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'minimum_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call half @llvm.minimum.f16(half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.minimum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.minimum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.minimum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.minimum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.minimum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.minimum.f16(half undef, half undef) + call <2 x half> @llvm.minimum.v2f16(<2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.minimum.v4f16(<4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.minimum.v8f16(<8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.minimum.v16f16(<16 x half> undef, <16 x half> undef) + call <vscale x 1 x half> @llvm.minimum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.minimum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.minimum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.minimum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.minimum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) + ret void +} + define void @maximum() { ; CHECK-LABEL: 'maximum' ; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call float @llvm.maximum.f32(float undef, float undef) @@ -245,6 +372,46 @@ define void @maximum() { ret void } +define void @maximum_f16() { +; ZVFH-LABEL: 'maximum_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call half @llvm.maximum.f16(half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.maximum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.maximum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.maximum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.maximum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.maximum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'maximum_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call half @llvm.maximum.f16(half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.maximum.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.maximum.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.maximum.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.maximum.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.maximum.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.maximum.f16(half undef, half undef) + call <2 x half> @llvm.maximum.v2f16(<2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.maximum.v4f16(<4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.maximum.v8f16(<8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.maximum.v16f16(<16 x half> undef, <16 x half> undef) + call <vscale x 1 x half> @llvm.maximum.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.maximum.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.maximum.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.maximum.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.maximum.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) + ret void +} + define void @copysign() { ; CHECK-LABEL: 'copysign' ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.copysign.f32(float undef, float undef) @@ -290,131 +457,42 @@ define void @copysign() { ret void } -declare half @llvm.fabs.f16(half) -declare <2 x half> @llvm.fabs.v2f16(<2 x half>) -declare <4 x half> @llvm.fabs.v4f16(<4 x half>) -declare <8 x half> @llvm.fabs.v8f16(<8 x half>) -declare <16 x half> @llvm.fabs.v16f16(<16 x half>) -declare <vscale x 2 x half> @llvm.fabs.nvx2f16(<vscale x 2 x half>) -declare <vscale x 4 x half> @llvm.fabs.nvx4f16(<vscale x 4 x half>) -declare <vscale x 8 x half> @llvm.fabs.nvx8f16(<vscale x 8 x half>) -declare <vscale x 16 x half> @llvm.fabs.nvx16f16(<vscale x 16 x half>) -declare float @llvm.fabs.f32(float) -declare <2 x float> @llvm.fabs.v2f32(<2 x float>) -declare <4 x float> @llvm.fabs.v4f32(<4 x float>) -declare <8 x float> @llvm.fabs.v8f32(<8 x float>) -declare <16 x float> @llvm.fabs.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.fabs.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.fabs.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.fabs.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.fabs.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.fabs.nvx16f32(<vscale x 16 x float>) -declare double @llvm.fabs.f64(double) -declare <2 x double> @llvm.fabs.v2f64(<2 x double>) -declare <4 x double> @llvm.fabs.v4f64(<4 x double>) -declare <8 x double> @llvm.fabs.v8f64(<8 x double>) -declare <16 x double> @llvm.fabs.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.fabs.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.fabs.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.fabs.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.fabs.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.minnum.f32(float, float) -declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>) -declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>) -declare <8 x float> @llvm.minnum.v8f32(<8 x float>, <8 x float>) -declare <16 x float> @llvm.minnum.v16f32(<16 x float>, <16 x float>) -declare <vscale x 1 x float> @llvm.minnum.nvx1f32(<vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.minnum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.minnum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.minnum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.minnum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>) -declare double @llvm.minnum.f64(double, double) -declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>) -declare <4 x double> @llvm.minnum.v4f64(<4 x double>, <4 x double>) -declare <8 x double> @llvm.minnum.v8f64(<8 x double>, <8 x double>) -declare <16 x double> @llvm.minnum.v16f64(<16 x double>, <16 x double>) -declare <vscale x 1 x double> @llvm.minnum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.minnum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.minnum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.minnum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>) - -declare float @llvm.maxnum.f32(float, float) -declare <2 x float> @llvm.maxnum.v2f32(<2 x float>, <2 x float>) -declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>) -declare <8 x float> @llvm.maxnum.v8f32(<8 x float>, <8 x float>) -declare <16 x float> @llvm.maxnum.v16f32(<16 x float>, <16 x float>) -declare <vscale x 1 x float> @llvm.maxnum.nvx1f32(<vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.maxnum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.maxnum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.maxnum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.maxnum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>) -declare double @llvm.maxnum.f64(double, double) -declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>) -declare <4 x double> @llvm.maxnum.v4f64(<4 x double>, <4 x double>) -declare <8 x double> @llvm.maxnum.v8f64(<8 x double>, <8 x double>) -declare <16 x double> @llvm.maxnum.v16f64(<16 x double>, <16 x double>) -declare <vscale x 1 x double> @llvm.maxnum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.maxnum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.maxnum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.maxnum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>) - -declare float @llvm.minimum.f32(float, float) -declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>) -declare <4 x float> @llvm.minimum.v4f32(<4 x float>, <4 x float>) -declare <8 x float> @llvm.minimum.v8f32(<8 x float>, <8 x float>) -declare <16 x float> @llvm.minimum.v16f32(<16 x float>, <16 x float>) -declare <vscale x 1 x float> @llvm.minimum.nvx1f32(<vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.minimum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.minimum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.minimum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.minimum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>) -declare double @llvm.minimum.f64(double, double) -declare <2 x double> @llvm.minimum.v2f64(<2 x double>, <2 x double>) -declare <4 x double> @llvm.minimum.v4f64(<4 x double>, <4 x double>) -declare <8 x double> @llvm.minimum.v8f64(<8 x double>, <8 x double>) -declare <16 x double> @llvm.minimum.v16f64(<16 x double>, <16 x double>) -declare <vscale x 1 x double> @llvm.minimum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.minimum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.minimum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.minimum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>) - -declare float @llvm.maximum.f32(float, float) -declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>) -declare <4 x float> @llvm.maximum.v4f32(<4 x float>, <4 x float>) -declare <8 x float> @llvm.maximum.v8f32(<8 x float>, <8 x float>) -declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>) -declare <vscale x 1 x float> @llvm.maximum.nvx1f32(<vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.maximum.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.maximum.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.maximum.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.maximum.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>) -declare double @llvm.maximum.f64(double, double) -declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>) -declare <4 x double> @llvm.maximum.v4f64(<4 x double>, <4 x double>) -declare <8 x double> @llvm.maximum.v8f64(<8 x double>, <8 x double>) -declare <16 x double> @llvm.maximum.v16f64(<16 x double>, <16 x double>) -declare <vscale x 1 x double> @llvm.maximum.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.maximum.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.maximum.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.maximum.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>) - -declare float @llvm.copysign.f32(float, float) -declare <2 x float> @llvm.copysign.v2f32(<2 x float>, <2 x float>) -declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>) -declare <8 x float> @llvm.copysign.v8f32(<8 x float>, <8 x float>) -declare <16 x float> @llvm.copysign.v16f32(<16 x float>, <16 x float>) -declare <vscale x 1 x float> @llvm.copysign.nvx1f32(<vscale x 1 x float>, <vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.copysign.nvx2f32(<vscale x 2 x float>, <vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.copysign.nvx4f32(<vscale x 4 x float>, <vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.copysign.nvx8f32(<vscale x 8 x float>, <vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.copysign.nvx16f32(<vscale x 16 x float>, <vscale x 16 x float>) -declare double @llvm.copysign.f64(double, double) -declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) -declare <4 x double> @llvm.copysign.v4f64(<4 x double>, <4 x double>) -declare <8 x double> @llvm.copysign.v8f64(<8 x double>, <8 x double>) -declare <16 x double> @llvm.copysign.v16f64(<16 x double>, <16 x double>) -declare <vscale x 1 x double> @llvm.copysign.nvx1f64(<vscale x 1 x double>, <vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.copysign.nvx2f64(<vscale x 2 x double>, <vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.copysign.nvx4f64(<vscale x 4 x double>, <vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.copysign.nvx8f64(<vscale x 8 x double>, <vscale x 8 x double>) +define void @copysign_f16() { +; ZVFH-LABEL: 'copysign_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.copysign.f16(half undef, half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'copysign_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.copysign.f16(half undef, half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %2 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 11 for instruction: %3 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %4 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %5 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.copysign.f16(half undef, half undef) + call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef) + call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef) + call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef) + call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef) + call <vscale x 1 x half> @llvm.copysign.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.copysign.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.copysign.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.copysign.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.copysign.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef) + ret void +} diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll index 71dd64d..dc501b82 100644 --- a/llvm/test/Analysis/CostModel/RISCV/fround.ll +++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d -riscv-v-vector-bits-min=-1 | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh | FileCheck %s --check-prefixes=CHECK,ZVFH +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin | FileCheck %s --check-prefixes=CHECK,ZVFHMIN define void @floor() { ; CHECK-LABEL: 'floor' @@ -46,6 +47,46 @@ define void @floor() { ret void } +define void @floor_fp16() { +; ZVFH-LABEL: 'floor_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.floor.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.floor.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.floor.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.floor.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.floor.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.floor.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.floor.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'floor_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.floor.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.floor.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.floor.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.floor.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.floor.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.floor.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.floor.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.floor.f16(half undef) + call <2 x half> @llvm.floor.v2f16(<2 x half> undef) + call <4 x half> @llvm.floor.v4f16(<4 x half> undef) + call <8 x half> @llvm.floor.v8f16(<8 x half> undef) + call <16 x half> @llvm.floor.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.floor.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.floor.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.floor.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.floor.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.floor.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @ceil() { ; CHECK-LABEL: 'ceil' ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call float @llvm.ceil.f32(float undef) @@ -91,6 +132,46 @@ define void @ceil() { ret void } +define void @ceil_fp16() { +; ZVFH-LABEL: 'ceil_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.ceil.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.ceil.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.ceil.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.ceil.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.ceil.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.ceil.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'ceil_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.ceil.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.ceil.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.ceil.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.ceil.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.ceil.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.ceil.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.ceil.f16(half undef) + call <2 x half> @llvm.ceil.v2f16(<2 x half> undef) + call <4 x half> @llvm.ceil.v4f16(<4 x half> undef) + call <8 x half> @llvm.ceil.v8f16(<8 x half> undef) + call <16 x half> @llvm.ceil.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.ceil.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.ceil.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.ceil.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.ceil.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.ceil.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @trunc() { ; CHECK-LABEL: 'trunc' ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call float @llvm.trunc.f32(float undef) @@ -136,6 +217,46 @@ define void @trunc() { ret void } +define void @trunc_fp16() { +; ZVFH-LABEL: 'trunc_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.trunc.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.trunc.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.trunc.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.trunc.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.trunc.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'trunc_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.trunc.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %2 = call <2 x half> @llvm.trunc.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %3 = call <4 x half> @llvm.trunc.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 79 for instruction: %4 = call <8 x half> @llvm.trunc.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 159 for instruction: %5 = call <16 x half> @llvm.trunc.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.trunc.f16(half undef) + call <2 x half> @llvm.trunc.v2f16(<2 x half> undef) + call <4 x half> @llvm.trunc.v4f16(<4 x half> undef) + call <8 x half> @llvm.trunc.v8f16(<8 x half> undef) + call <16 x half> @llvm.trunc.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.trunc.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.trunc.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.trunc.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.trunc.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.trunc.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @rint() { ; CHECK-LABEL: 'rint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call float @llvm.rint.f32(float undef) @@ -181,6 +302,46 @@ define void @rint() { ret void } +define void @rint_fp16() { +; ZVFH-LABEL: 'rint_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.rint.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.rint.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.rint.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.rint.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.rint.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.rint.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.rint.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'rint_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.rint.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.rint.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.rint.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.rint.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.rint.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.rint.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.rint.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.rint.f16(half undef) + call <2 x half> @llvm.rint.v2f16(<2 x half> undef) + call <4 x half> @llvm.rint.v4f16(<4 x half> undef) + call <8 x half> @llvm.rint.v8f16(<8 x half> undef) + call <16 x half> @llvm.rint.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.rint.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.rint.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.rint.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.rint.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.rint.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @lrint() { ; CHECK-LABEL: 'lrint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.lrint.i64.f32(float undef) @@ -226,6 +387,33 @@ define void @lrint() { ret void } +define void @lrint_fp16() { +; CHECK-LABEL: 'lrint_fp16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.lrint.i64.f16(half undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f16(<vscale x 1 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f16(<vscale x 2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f16(<vscale x 4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f16(<vscale x 8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f16(<vscale x 16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call i64 @llvm.lrint.f16(half undef) + call <2 x i64> @llvm.lrint.v2f16(<2 x half> undef) + call <4 x i64> @llvm.lrint.v4f16(<4 x half> undef) + call <8 x i64> @llvm.lrint.v8f16(<8 x half> undef) + call <16 x i64> @llvm.lrint.v16f16(<16 x half> undef) + call <vscale x 1 x i64> @llvm.lrint.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x i64> @llvm.lrint.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x i64> @llvm.lrint.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x i64> @llvm.lrint.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x i64> @llvm.lrint.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @llrint() { ; CHECK-LABEL: 'llrint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.f32(float undef) @@ -271,6 +459,33 @@ define void @llrint() { ret void } +define void @llrint_fp16() { +; CHECK-LABEL: 'llrint_fp16' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.f16(half undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call i64 @llvm.llrint.f16(half undef) + call <2 x i64> @llvm.llrint.v2f16(<2 x half> undef) + call <4 x i64> @llvm.llrint.v4f16(<4 x half> undef) + call <8 x i64> @llvm.llrint.v8f16(<8 x half> undef) + call <16 x i64> @llvm.llrint.v16f16(<16 x half> undef) + call <vscale x 1 x i64> @llvm.llrint.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x i64> @llvm.llrint.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x i64> @llvm.llrint.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x i64> @llvm.llrint.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x i64> @llvm.llrint.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @nearbyint() { ; CHECK-LABEL: 'nearbyint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.nearbyint.f32(float undef) @@ -316,6 +531,46 @@ define void @nearbyint() { ret void } +define void @nearbyint_fp16() { +; ZVFH-LABEL: 'nearbyint_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.nearbyint.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.nearbyint.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.nearbyint.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.nearbyint.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.nearbyint.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.nearbyint.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.nearbyint.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'nearbyint_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call half @llvm.nearbyint.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.nearbyint.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.nearbyint.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.nearbyint.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.nearbyint.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.nearbyint.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.nearbyint.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.nearbyint.f16(half undef) + call <2 x half> @llvm.nearbyint.v2f16(<2 x half> undef) + call <4 x half> @llvm.nearbyint.v4f16(<4 x half> undef) + call <8 x half> @llvm.nearbyint.v8f16(<8 x half> undef) + call <16 x half> @llvm.nearbyint.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.nearbyint.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.nearbyint.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.nearbyint.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.nearbyint.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.nearbyint.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @round() { ; CHECK-LABEL: 'round' ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call float @llvm.round.f32(float undef) @@ -361,6 +616,46 @@ define void @round() { ret void } +define void @round_fp16() { +; ZVFH-LABEL: 'round_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.round.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.round.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.round.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.round.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.round.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'round_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.round.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.round.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.round.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.round.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.round.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.round.f16(half undef) + call <2 x half> @llvm.round.v2f16(<2 x half> undef) + call <4 x half> @llvm.round.v4f16(<4 x half> undef) + call <8 x half> @llvm.round.v8f16(<8 x half> undef) + call <16 x half> @llvm.round.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.round.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.round.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.round.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.round.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.round.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @roundeven() { ; CHECK-LABEL: 'roundeven' ; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call float @llvm.roundeven.f32(float undef) @@ -406,6 +701,46 @@ define void @roundeven() { ret void } +define void @roundeven_fp16() { +; ZVFH-LABEL: 'roundeven_fp16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.roundeven.f16(half undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x half> @llvm.roundeven.v2f16(<2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.roundeven.v16f16(<16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half> undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'roundeven_fp16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %1 = call half @llvm.roundeven.f16(half undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.roundeven.v2f16(<2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.roundeven.v4f16(<4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.roundeven.v8f16(<8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x half> @llvm.roundeven.v16f16(<16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half> undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call half @llvm.roundeven.f16(half undef) + call <2 x half> @llvm.roundeven.v2f16(<2 x half> undef) + call <4 x half> @llvm.roundeven.v4f16(<4 x half> undef) + call <8 x half> @llvm.roundeven.v8f16(<8 x half> undef) + call <16 x half> @llvm.roundeven.v16f16(<16 x half> undef) + call <vscale x 1 x half> @llvm.roundeven.nvx1f16(<vscale x 1 x half> undef) + call <vscale x 2 x half> @llvm.roundeven.nvx2f16(<vscale x 2 x half> undef) + call <vscale x 4 x half> @llvm.roundeven.nvx4f16(<vscale x 4 x half> undef) + call <vscale x 8 x half> @llvm.roundeven.nvx8f16(<vscale x 8 x half> undef) + call <vscale x 16 x half> @llvm.roundeven.nvx16f16(<vscale x 16 x half> undef) + ret void +} + define void @vp_ceil() { ; CHECK-LABEL: 'vp_ceil' ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x float> @llvm.vp.ceil.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -447,6 +782,43 @@ define void @vp_ceil() { ret void } +define void @vp_ceil_f16() { +; ZVFH-LABEL: 'vp_ceil_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %2 = call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %3 = call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %4 = call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_ceil_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.ceil.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.ceil.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.ceil.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.ceil.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.ceil.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} + define void @vp_floor() { ; CHECK-LABEL: 'vp_floor' ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -488,6 +860,43 @@ define void @vp_floor() { ret void } +define void @vp_floor_f16() { +; ZVFH-LABEL: 'vp_floor_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %2 = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %3 = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %4 = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_floor_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.floor.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.floor.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.floor.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.floor.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.floor.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.floor.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.floor.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.floor.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.floor.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} + define void @vp_round() { ; CHECK-LABEL: 'vp_round' ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x float> @llvm.vp.round.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -529,6 +938,43 @@ define void @vp_round() { ret void } +define void @vp_round_f16() { +; ZVFH-LABEL: 'vp_round_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x half> @llvm.vp.round.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %2 = call <4 x half> @llvm.vp.round.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %3 = call <8 x half> @llvm.vp.round.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %4 = call <16 x half> @llvm.vp.round.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_round_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.round.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.round.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.round.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.round.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.round.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.round.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.round.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.round.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.round.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.round.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.round.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.round.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.round.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} + define void @vp_roundeven() { ; CHECK-LABEL: 'vp_roundeven' ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -570,6 +1016,43 @@ define void @vp_roundeven() { ret void } +define void @vp_roundeven_f16() { +; ZVFH-LABEL: 'vp_roundeven_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %2 = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %3 = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %4 = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_roundeven_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.roundeven.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.roundeven.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.roundeven.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.roundeven.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.roundeven.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} + define void @vp_roundtozero() { ; CHECK-LABEL: 'vp_roundtozero' ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -611,6 +1094,43 @@ define void @vp_roundtozero() { ret void } +define void @vp_roundtozero_f16() { +; ZVFH-LABEL: 'vp_roundtozero_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %2 = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %3 = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %4 = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_roundtozero_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %1 = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %3 = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 190 for instruction: %4 = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.roundtozero.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.roundtozero.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.roundtozero.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.roundtozero.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.roundtozero.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} + define void @vp_rint() { ; CHECK-LABEL: 'vp_rint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %1 = call <2 x float> @llvm.vp.rint.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -652,6 +1172,43 @@ define void @vp_rint() { ret void } +define void @vp_rint_f16() { +; ZVFH-LABEL: 'vp_rint_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %1 = call <2 x half> @llvm.vp.rint.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %2 = call <4 x half> @llvm.vp.rint.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %3 = call <8 x half> @llvm.vp.rint.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %4 = call <16 x half> @llvm.vp.rint.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_rint_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.rint.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.rint.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.rint.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.rint.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.rint.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.rint.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.rint.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.rint.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.rint.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.rint.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.rint.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.rint.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.rint.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} + define void @vp_nearbyint() { ; CHECK-LABEL: 'vp_nearbyint' ; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef) @@ -693,315 +1250,39 @@ define void @vp_nearbyint() { ret void } -declare float @llvm.floor.f32(float) -declare <2 x float> @llvm.floor.v2f32(<2 x float>) -declare <4 x float> @llvm.floor.v4f32(<4 x float>) -declare <8 x float> @llvm.floor.v8f32(<8 x float>) -declare <16 x float> @llvm.floor.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.floor.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.floor.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.floor.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.floor.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.floor.nvx16f32(<vscale x 16 x float>) -declare double @llvm.floor.f64(double) -declare <2 x double> @llvm.floor.v2f64(<2 x double>) -declare <4 x double> @llvm.floor.v4f64(<4 x double>) -declare <8 x double> @llvm.floor.v8f64(<8 x double>) -declare <16 x double> @llvm.floor.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.floor.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.floor.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.floor.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.floor.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.ceil.f32(float) -declare <2 x float> @llvm.ceil.v2f32(<2 x float>) -declare <4 x float> @llvm.ceil.v4f32(<4 x float>) -declare <8 x float> @llvm.ceil.v8f32(<8 x float>) -declare <16 x float> @llvm.ceil.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.ceil.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.ceil.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.ceil.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.ceil.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.ceil.nvx16f32(<vscale x 16 x float>) -declare double @llvm.ceil.f64(double) -declare <2 x double> @llvm.ceil.v2f64(<2 x double>) -declare <4 x double> @llvm.ceil.v4f64(<4 x double>) -declare <8 x double> @llvm.ceil.v8f64(<8 x double>) -declare <16 x double> @llvm.ceil.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.ceil.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.ceil.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.ceil.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.ceil.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.trunc.f32(float) -declare <2 x float> @llvm.trunc.v2f32(<2 x float>) -declare <4 x float> @llvm.trunc.v4f32(<4 x float>) -declare <8 x float> @llvm.trunc.v8f32(<8 x float>) -declare <16 x float> @llvm.trunc.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.trunc.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.trunc.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.trunc.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.trunc.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.trunc.nvx16f32(<vscale x 16 x float>) -declare double @llvm.trunc.f64(double) -declare <2 x double> @llvm.trunc.v2f64(<2 x double>) -declare <4 x double> @llvm.trunc.v4f64(<4 x double>) -declare <8 x double> @llvm.trunc.v8f64(<8 x double>) -declare <16 x double> @llvm.trunc.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.trunc.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.trunc.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.trunc.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.trunc.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.rint.f32(float) -declare <2 x float> @llvm.rint.v2f32(<2 x float>) -declare <4 x float> @llvm.rint.v4f32(<4 x float>) -declare <8 x float> @llvm.rint.v8f32(<8 x float>) -declare <16 x float> @llvm.rint.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.rint.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.rint.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.rint.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.rint.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.rint.nvx16f32(<vscale x 16 x float>) -declare double @llvm.rint.f64(double) -declare <2 x double> @llvm.rint.v2f64(<2 x double>) -declare <4 x double> @llvm.rint.v4f64(<4 x double>) -declare <8 x double> @llvm.rint.v8f64(<8 x double>) -declare <16 x double> @llvm.rint.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.rint.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.rint.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.rint.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.rint.nvx8f64(<vscale x 8 x double>) - -declare i64 @llvm.lrint.i64.f32(float) -declare <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float>) -declare <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float>) -declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>) -declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>) -declare <vscale x 1 x i64> @llvm.lrint.nvx1i64.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x i64> @llvm.lrint.nvx2i64.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x i64> @llvm.lrint.nvx4i64.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x i64> @llvm.lrint.nvx8i64.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x i64> @llvm.lrint.nvx16i64.nvx16f32(<vscale x 16 x float>) -declare i64 @llvm.lrint.i64.f64(double) -declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>) -declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>) -declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>) -declare <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double>) -declare <vscale x 1 x i64> @llvm.lrint.nvx1i64.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x i64> @llvm.lrint.nvx2i64.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x i64> @llvm.lrint.nvx4i64.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x i64> @llvm.lrint.nvx8i64.nvx8f64(<vscale x 8 x double>) - -declare i64 @llvm.llrint.i64.f32(float) -declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>) -declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>) -declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>) -declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>) -declare <vscale x 1 x i64> @llvm.llrint.nvx1i64.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x i64> @llvm.llrint.nvx2i64.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x i64> @llvm.llrint.nvx4i64.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x i64> @llvm.llrint.nvx8i64.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x i64> @llvm.llrint.nvx16i64.nvx16f32(<vscale x 16 x float>) -declare i64 @llvm.llrint.i64.f64(double) -declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>) -declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>) -declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>) -declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>) -declare <vscale x 1 x i64> @llvm.llrint.nvx1i64.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x i64> @llvm.llrint.nvx2i64.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x i64> @llvm.llrint.nvx4i64.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x i64> @llvm.llrint.nvx8i64.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.nearbyint.f32(float) -declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>) -declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>) -declare <8 x float> @llvm.nearbyint.v8f32(<8 x float>) -declare <16 x float> @llvm.nearbyint.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.nearbyint.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.nearbyint.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.nearbyint.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.nearbyint.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.nearbyint.nvx16f32(<vscale x 16 x float>) -declare double @llvm.nearbyint.f64(double) -declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>) -declare <4 x double> @llvm.nearbyint.v4f64(<4 x double>) -declare <8 x double> @llvm.nearbyint.v8f64(<8 x double>) -declare <16 x double> @llvm.nearbyint.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.nearbyint.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.nearbyint.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.nearbyint.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.nearbyint.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.round.f32(float) -declare <2 x float> @llvm.round.v2f32(<2 x float>) -declare <4 x float> @llvm.round.v4f32(<4 x float>) -declare <8 x float> @llvm.round.v8f32(<8 x float>) -declare <16 x float> @llvm.round.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.round.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.round.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.round.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.round.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.round.nvx16f32(<vscale x 16 x float>) -declare double @llvm.round.f64(double) -declare <2 x double> @llvm.round.v2f64(<2 x double>) -declare <4 x double> @llvm.round.v4f64(<4 x double>) -declare <8 x double> @llvm.round.v8f64(<8 x double>) -declare <16 x double> @llvm.round.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.round.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.round.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.round.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.round.nvx8f64(<vscale x 8 x double>) - -declare float @llvm.roundeven.f32(float) -declare <2 x float> @llvm.roundeven.v2f32(<2 x float>) -declare <4 x float> @llvm.roundeven.v4f32(<4 x float>) -declare <8 x float> @llvm.roundeven.v8f32(<8 x float>) -declare <16 x float> @llvm.roundeven.v16f32(<16 x float>) -declare <vscale x 1 x float> @llvm.roundeven.nvx1f32(<vscale x 1 x float>) -declare <vscale x 2 x float> @llvm.roundeven.nvx2f32(<vscale x 2 x float>) -declare <vscale x 4 x float> @llvm.roundeven.nvx4f32(<vscale x 4 x float>) -declare <vscale x 8 x float> @llvm.roundeven.nvx8f32(<vscale x 8 x float>) -declare <vscale x 16 x float> @llvm.roundeven.nvx16f32(<vscale x 16 x float>) -declare double @llvm.roundeven.f64(double) -declare <2 x double> @llvm.roundeven.v2f64(<2 x double>) -declare <4 x double> @llvm.roundeven.v4f64(<4 x double>) -declare <8 x double> @llvm.roundeven.v8f64(<8 x double>) -declare <16 x double> @llvm.roundeven.v16f64(<16 x double>) -declare <vscale x 1 x double> @llvm.roundeven.nvx1f64(<vscale x 1 x double>) -declare <vscale x 2 x double> @llvm.roundeven.nvx2f64(<vscale x 2 x double>) -declare <vscale x 4 x double> @llvm.roundeven.nvx4f64(<vscale x 4 x double>) -declare <vscale x 8 x double> @llvm.roundeven.nvx8f64(<vscale x 8 x double>) - -declare <2 x float> @llvm.vp.ceil.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.ceil.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.ceil.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.ceil.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.ceil.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.ceil.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.ceil.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.ceil.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.ceil.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.ceil.f64(double) -declare <2 x double> @llvm.vp.ceil.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.ceil.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.ceil.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.ceil.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.ceil.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) - -declare <2 x float> @llvm.vp.floor.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.floor.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.floor.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.floor.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.floor.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.floor.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.floor.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.floor.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.floor.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.floor.f64(double) -declare <2 x double> @llvm.vp.floor.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.floor.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.floor.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.floor.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.floor.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) - -declare <2 x float> @llvm.vp.round.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.round.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.round.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.round.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.round.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.round.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.round.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.round.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.round.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.round.f64(double) -declare <2 x double> @llvm.vp.round.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.round.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.round.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.round.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.round.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) - -declare <2 x float> @llvm.vp.roundeven.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.roundeven.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.roundeven.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.roundeven.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.roundeven.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.roundeven.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.roundeven.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.roundeven.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.roundeven.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.roundeven.f64(double) -declare <2 x double> @llvm.vp.roundeven.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.roundeven.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.roundeven.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.roundeven.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.roundeven.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) - -declare <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.roundtozero.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.roundtozero.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.roundtozero.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.roundtozero.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.roundtozero.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.roundtozero.f64(double) -declare <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.roundtozero.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.roundtozero.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.roundtozero.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.roundtozero.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) - -declare <2 x float> @llvm.vp.rint.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.rint.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.rint.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.rint.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.rint.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.rint.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.rint.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.rint.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.rint.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.rint.f64(double) -declare <2 x double> @llvm.vp.rint.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.rint.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.rint.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.rint.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.rint.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) - -declare <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float>, <2 x i1>, i32) -declare <4 x float> @llvm.vp.nearbyint.v4f32(<4 x float>, <4 x i1>, i32) -declare <8 x float> @llvm.vp.nearbyint.v8f32(<8 x float>, <8 x i1>, i32) -declare <16 x float> @llvm.vp.nearbyint.v16f32(<16 x float>, <16 x i1>, i32) -declare <vscale x 1 x float> @llvm.vp.nearbyint.nvx1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x float> @llvm.vp.nearbyint.nvx2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x float> @llvm.vp.nearbyint.nvx4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x float> @llvm.vp.nearbyint.nvx8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32) -declare <vscale x 16 x float> @llvm.vp.nearbyint.nvx16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32) -declare double @llvm.vp.nearbyint.f64(double) -declare <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double>, <2 x i1>, i32) -declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32) -declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32) -declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32) -declare <vscale x 1 x double> @llvm.vp.nearbyint.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32) -declare <vscale x 2 x double> @llvm.vp.nearbyint.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32) -declare <vscale x 4 x double> @llvm.vp.nearbyint.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32) -declare <vscale x 8 x double> @llvm.vp.nearbyint.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32) +define void @vp_nearbyint_f16() { +; ZVFH-LABEL: 'vp_nearbyint_f16' +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %1 = call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %2 = call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %3 = call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %4 = call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; ZVFHMIN-LABEL: 'vp_nearbyint_f16' +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) +; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half> undef, <2 x i1> undef, i32 undef) + call <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half> undef, <4 x i1> undef, i32 undef) + call <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half> undef, <8 x i1> undef, i32 undef) + call <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half> undef, <16 x i1> undef, i32 undef) + call <vscale x 1 x half> @llvm.vp.nearbyint.nvx1f16(<vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef) + call <vscale x 2 x half> @llvm.vp.nearbyint.nvx2f16(<vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef) + call <vscale x 4 x half> @llvm.vp.nearbyint.nvx4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef) + call <vscale x 8 x half> @llvm.vp.nearbyint.nvx8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef) + call <vscale x 16 x half> @llvm.vp.nearbyint.nvx16f16(<vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef) + ret void +} diff --git a/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll b/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll index e68fcf6..2f35f72 100644 --- a/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll +++ b/llvm/test/Analysis/CostModel/RISCV/scalable-gather.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,GENERIC +; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,GENERIC,GENERICZVFH +; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,GENERIC,GENERICZVFHMIN ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK,MAX256 ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 < %s | FileCheck %s --check-prefixes=CHECK,UNSUPPORTED ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,UNSUPPORTED @@ -15,12 +16,6 @@ define void @masked_gather_aligned() { ; GENERIC-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F32 = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x float> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F32 = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) @@ -59,12 +54,6 @@ define void @masked_gather_aligned() { ; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F32 = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x float> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F32 = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8I64 = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4I64 = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2I64 = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) @@ -103,12 +92,6 @@ define void @masked_gather_aligned() { ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V4F32 = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x float> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V2F32 = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V1F32 = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V8I64 = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V4I64 = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V2I64 = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) @@ -148,13 +131,6 @@ define void @masked_gather_aligned() { %V2F32 = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) %V1F32 = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) - %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) - %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) - %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) - %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) - %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) - %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) - %V8I64 = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) %V4I64 = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) %V2I64 = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) @@ -189,6 +165,53 @@ define void @masked_gather_aligned() { ret void } +define void @masked_gather_aligned_f16() { +; GENERICZVFH-LABEL: 'masked_gather_aligned_f16' +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; GENERICZVFHMIN-LABEL: 'masked_gather_aligned_f16' +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; MAX256-LABEL: 'masked_gather_aligned_f16' +; MAX256-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; UNSUPPORTED-LABEL: 'masked_gather_aligned_f16' +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) +; UNSUPPORTED-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %V32F16 = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) + %V16F16 = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) + %V8F16 = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) + %V4F16 = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) + %V2F16 = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) + %V1F16 = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) + + ret void +} + define void @masked_gather_unaligned() { ; CHECK-LABEL: 'masked_gather_unaligned' ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F64.u = call <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef, <vscale x 8 x double> undef) @@ -200,12 +223,6 @@ define void @masked_gather_unaligned() { ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F32.u = call <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef, <vscale x 4 x float> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F32.u = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F32.u = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32F16.u = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F16.u = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F16.u = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F16.u = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F16.u = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F16.u = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8I64.u = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> undef, i32 4, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4I64.u = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2I64.u = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) @@ -238,13 +255,6 @@ define void @masked_gather_unaligned() { %V2F32.u = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef, <vscale x 2 x float> undef) %V1F32.u = call <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef, <vscale x 1 x float> undef) - %V32F16.u = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) - %V16F16.u = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) - %V8F16.u = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) - %V4F16.u = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) - %V2F16.u = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) - %V1F16.u = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) - %V8I64.u = call <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> undef, i32 4, <vscale x 8 x i1> undef, <vscale x 8 x i64> undef) %V4I64.u = call <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef, <vscale x 4 x i64> undef) %V2I64.u = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef, <vscale x 2 x i64> undef) @@ -271,51 +281,22 @@ define void @masked_gather_unaligned() { ret void } -declare <vscale x 8 x double> @llvm.masked.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x double>) -declare <vscale x 4 x double> @llvm.masked.gather.nxv4f64.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x double>) -declare <vscale x 2 x double> @llvm.masked.gather.nxv2f64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x double>) -declare <vscale x 1 x double> @llvm.masked.gather.nxv1f64.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x double>) - -declare <vscale x 16 x float> @llvm.masked.gather.nxv16f32.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x float>) -declare <vscale x 8 x float> @llvm.masked.gather.nxv8f32.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x float>) -declare <vscale x 4 x float> @llvm.masked.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x float>) -declare <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x float>) -declare <vscale x 1 x float> @llvm.masked.gather.nxv1f32.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x float>) - -declare <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x half>) -declare <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x half>) -declare <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x half>) -declare <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x half>) -declare <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x half>) -declare <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x half>) - -declare <vscale x 8 x i64> @llvm.masked.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i64>) -declare <vscale x 4 x i64> @llvm.masked.gather.nxv4i64.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i64>) -declare <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i64>) -declare <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i64>) - -declare <vscale x 16 x i32> @llvm.masked.gather.nxv16i32.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i32>) -declare <vscale x 8 x i32> @llvm.masked.gather.nxv8i32.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i32>) -declare <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i32>) -declare <vscale x 2 x i32> @llvm.masked.gather.nxv2i32.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i32>) -declare <vscale x 1 x i32> @llvm.masked.gather.nxv1i32.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i32>) - -declare <vscale x 32 x i16> @llvm.masked.gather.nxv32i16.nxv32p0(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x i16>) -declare <vscale x 16 x i16> @llvm.masked.gather.nxv16i16.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i16>) -declare <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i16>) -declare <vscale x 4 x i16> @llvm.masked.gather.nxv4i16.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i16>) -declare <vscale x 2 x i16> @llvm.masked.gather.nxv2i16.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i16>) -declare <vscale x 1 x i16> @llvm.masked.gather.nxv1i16.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i16>) - -declare <vscale x 64 x i8> @llvm.masked.gather.nxv64i8.nxv64p0(<vscale x 64 x ptr>, i32, <vscale x 64 x i1>, <vscale x 64 x i8>) -declare <vscale x 32 x i8> @llvm.masked.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr>, i32, <vscale x 32 x i1>, <vscale x 32 x i8>) -declare <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr>, i32, <vscale x 16 x i1>, <vscale x 16 x i8>) -declare <vscale x 8 x i8> @llvm.masked.gather.nxv8i8.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x i8>) -declare <vscale x 4 x i8> @llvm.masked.gather.nxv4i8.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x i8>) -declare <vscale x 2 x i8> @llvm.masked.gather.nxv2i8.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x i8>) -declare <vscale x 1 x i8> @llvm.masked.gather.nxv1i8.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x i8>) +define void @masked_gather_unaligned_f16() { +; CHECK-LABEL: 'masked_gather_unaligned_f16' +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V32F16.u = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V16F16.u = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V8F16.u = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V4F16.u = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V2F16.u = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: %V1F16.u = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %V32F16.u = call <vscale x 32 x half> @llvm.masked.gather.nxv32f16.nxv32p0(<vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef, <vscale x 32 x half> undef) + %V16F16.u = call <vscale x 16 x half> @llvm.masked.gather.nxv16f16.nxv16p0(<vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x half> undef) + %V8F16.u = call <vscale x 8 x half> @llvm.masked.gather.nxv8f16.nxv8p0(<vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef, <vscale x 8 x half> undef) + %V4F16.u = call <vscale x 4 x half> @llvm.masked.gather.nxv4f16.nxv4p0(<vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef, <vscale x 4 x half> undef) + %V2F16.u = call <vscale x 2 x half> @llvm.masked.gather.nxv2f16.nxv2p0(<vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef, <vscale x 2 x half> undef) + %V1F16.u = call <vscale x 1 x half> @llvm.masked.gather.nxv1f16.nxv1p0(<vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef, <vscale x 1 x half> undef) -declare <vscale x 8 x ptr> @llvm.masked.gather.nxv8p0.nxv8p0(<vscale x 8 x ptr>, i32, <vscale x 8 x i1>, <vscale x 8 x ptr>) -declare <vscale x 4 x ptr> @llvm.masked.gather.nxv4p0.nxv4p0(<vscale x 4 x ptr>, i32, <vscale x 4 x i1>, <vscale x 4 x ptr>) -declare <vscale x 2 x ptr> @llvm.masked.gather.nxv2p0.nxv2p0(<vscale x 2 x ptr>, i32, <vscale x 2 x i1>, <vscale x 2 x ptr>) -declare <vscale x 1 x ptr> @llvm.masked.gather.nxv1p0.nxv1p0(<vscale x 1 x ptr>, i32, <vscale x 1 x i1>, <vscale x 1 x ptr>) + ret void +} diff --git a/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll b/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll index f7c8437..8e917a4 100644 --- a/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll +++ b/llvm/test/Analysis/CostModel/RISCV/scalable-scatter.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py -; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,GENERIC +; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,GENERIC,GENERICZVFH +; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,GENERIC,GENERICZVFHMIN ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK,MAX256 ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 < %s | FileCheck %s --check-prefixes=CHECK,UNSUPPORTED ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+zve32f,+zvl128b,+f,+d,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,UNSUPPORTED @@ -15,12 +16,6 @@ define void @masked_scatter_aligned() { ; GENERIC-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> undef, <vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> undef, <vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float> undef, <vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) -; GENERIC-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> undef, <vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> undef, <vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef) ; GENERIC-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> undef, <vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef) @@ -59,12 +54,6 @@ define void @masked_scatter_aligned() { ; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> undef, <vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> undef, <vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float> undef, <vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) -; MAX256-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> undef, <vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> undef, <vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef) ; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> undef, <vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef) @@ -103,12 +92,6 @@ define void @masked_scatter_aligned() { ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> undef, <vscale x 4 x ptr> undef, i32 4, <vscale x 4 x i1> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> undef, <vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float> undef, <vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) -; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> undef, <vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> undef, <vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef) ; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> undef, <vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef) @@ -148,13 +131,6 @@ define void @masked_scatter_aligned() { call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> undef, <vscale x 2 x ptr> undef, i32 4, <vscale x 2 x i1> undef) call void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float> undef, <vscale x 1 x ptr> undef, i32 4, <vscale x 1 x i1> undef) - call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) - call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) - call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) - call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) - call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) - call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) - call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> undef, <vscale x 8 x ptr> undef, i32 8, <vscale x 8 x i1> undef) call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> undef, <vscale x 4 x ptr> undef, i32 8, <vscale x 4 x i1> undef) call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> undef, <vscale x 2 x ptr> undef, i32 8, <vscale x 2 x i1> undef) @@ -189,6 +165,53 @@ define void @masked_scatter_aligned() { ret void } +define void @masked_scatter_aligned_f16() { +; GENERICZVFH-LABEL: 'masked_scatter_aligned_f16' +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) +; GENERICZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; GENERICZVFHMIN-LABEL: 'masked_scatter_aligned_f16' +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) +; GENERICZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; MAX256-LABEL: 'masked_scatter_aligned_f16' +; MAX256-NEXT: Cost Model: Found an estimated cost of 64 for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 32 for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) +; MAX256-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; +; UNSUPPORTED-LABEL: 'masked_scatter_aligned_f16' +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) +; UNSUPPORTED-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) +; UNSUPPORTED-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 2, <vscale x 32 x i1> undef) + call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 2, <vscale x 16 x i1> undef) + call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) + call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) + call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) + call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) + + ret void +} + define void @masked_scatter_unaligned() { ; CHECK-LABEL: 'masked_scatter_unaligned' ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f64.nxv8p0(<vscale x 8 x double> undef, <vscale x 8 x ptr> undef, i32 2, <vscale x 8 x i1> undef) @@ -200,12 +223,6 @@ define void @masked_scatter_unaligned() { ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> undef, <vscale x 4 x ptr> undef, i32 2, <vscale x 4 x i1> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef) -; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> undef, <vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> undef, <vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef) ; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> undef, <vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef) @@ -238,13 +255,6 @@ define void @masked_scatter_unaligned() { call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> undef, <vscale x 2 x ptr> undef, i32 2, <vscale x 2 x i1> undef) call void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float> undef, <vscale x 1 x ptr> undef, i32 2, <vscale x 1 x i1> undef) - call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef) - call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef) - call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef) - call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef) - call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef) - call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef) - call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> undef, <vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef) call void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64> undef, <vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef) call void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> undef, <vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef) @@ -271,52 +281,22 @@ define void @masked_scatter_unaligned() { ret void } -declare void @llvm.masked.scatter.nxv8f64.nxv8p0(<vscale x 8 x double>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4f64.nxv4p0(<vscale x 4 x double>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2f64.nxv2p0(<vscale x 2 x double>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1f64.nxv1p0(<vscale x 1 x double>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv16f32.nxv16p0(<vscale x 16 x float>, <vscale x 16 x ptr>, i32, <vscale x 16 x i1>) -declare void @llvm.masked.scatter.nxv8f32.nxv8p0(<vscale x 8 x float>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1f32.nxv1p0(<vscale x 1 x float>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half>, <vscale x 32 x ptr>, i32, <vscale x 32 x i1>) -declare void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half>, <vscale x 16 x ptr>, i32, <vscale x 16 x i1>) -declare void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4i64.nxv4p0(<vscale x 4 x i64>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv16i32.nxv16p0(<vscale x 16 x i32>, <vscale x 16 x ptr>, i32, <vscale x 16 x i1>) -declare void @llvm.masked.scatter.nxv8i32.nxv8p0(<vscale x 8 x i32>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1i32.nxv1p0(<vscale x 1 x i32>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv32i16.nxv32p0(<vscale x 32 x i16>, <vscale x 32 x ptr>, i32, <vscale x 32 x i1>) -declare void @llvm.masked.scatter.nxv16i16.nxv16p0(<vscale x 16 x i16>, <vscale x 16 x ptr>, i32, <vscale x 16 x i1>) -declare void @llvm.masked.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4i16.nxv4p0(<vscale x 4 x i16>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2i16.nxv2p0(<vscale x 2 x i16>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1i16.nxv1p0(<vscale x 1 x i16>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv64i8.nxv64p0(<vscale x 64 x i8>, <vscale x 64 x ptr>, i32, <vscale x 64 x i1>) -declare void @llvm.masked.scatter.nxv32i8.nxv32p0(<vscale x 32 x i8>, <vscale x 32 x ptr>, i32, <vscale x 32 x i1>) -declare void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8>, <vscale x 16 x ptr>, i32, <vscale x 16 x i1>) -declare void @llvm.masked.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1i8.nxv1p0(<vscale x 1 x i8>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) - -declare void @llvm.masked.scatter.nxv8p0.nxv8p0(<vscale x 8 x ptr>, <vscale x 8 x ptr>, i32, <vscale x 8 x i1>) -declare void @llvm.masked.scatter.nxv4p0.nxv4p0(<vscale x 4 x ptr>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>) -declare void @llvm.masked.scatter.nxv2p0.nxv2p0(<vscale x 2 x ptr>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>) -declare void @llvm.masked.scatter.nxv1p0.nxv1p0(<vscale x 1 x ptr>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>) +define void @masked_scatter_unaligned_f16() { +; CHECK-LABEL: 'masked_scatter_unaligned_f16' +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef) +; CHECK-NEXT: Cost Model: Invalid cost for instruction: call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call void @llvm.masked.scatter.nxv32f16.nxv32p0(<vscale x 32 x half> undef, <vscale x 32 x ptr> undef, i32 1, <vscale x 32 x i1> undef) + call void @llvm.masked.scatter.nxv16f16.nxv16p0(<vscale x 16 x half> undef, <vscale x 16 x ptr> undef, i32 1, <vscale x 16 x i1> undef) + call void @llvm.masked.scatter.nxv8f16.nxv8p0(<vscale x 8 x half> undef, <vscale x 8 x ptr> undef, i32 1, <vscale x 8 x i1> undef) + call void @llvm.masked.scatter.nxv4f16.nxv4p0(<vscale x 4 x half> undef, <vscale x 4 x ptr> undef, i32 1, <vscale x 4 x i1> undef) + call void @llvm.masked.scatter.nxv2f16.nxv2p0(<vscale x 2 x half> undef, <vscale x 2 x ptr> undef, i32 1, <vscale x 2 x i1> undef) + call void @llvm.masked.scatter.nxv1f16.nxv1p0(<vscale x 1 x half> undef, <vscale x 1 x ptr> undef, i32 1, <vscale x 1 x i1> undef) + ret void +} diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll index 9b34320..acc340b 100644 --- a/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll +++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-broadcast.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s ; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s --check-prefix=SIZE +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s --check-prefix=SIZE define void @broadcast_scalable() { ; CHECK-LABEL: 'broadcast_scalable' diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-permute.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-permute.ll index 5fc7545..105cc8a 100644 --- a/llvm/test/Analysis/CostModel/RISCV/shuffle-permute.ll +++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-permute.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s ; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s --check-prefix=SIZE +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s --check-prefix=SIZE ; Check that we don't crash querying costs when vectors are not enabled. ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll index e80dbe3..3132178 100644 --- a/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll +++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-reverse.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfhmin -riscv-v-vector-bits-min=-1 | FileCheck %s ; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s --check-prefixes=SIZE +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfhmin -riscv-v-vector-bits-min=-1 | FileCheck %s --check-prefixes=SIZE ; Check that we don't crash querying costs when vectors are not enabled. ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 diff --git a/llvm/test/Analysis/CostModel/RISCV/shuffle-transpose.ll b/llvm/test/Analysis/CostModel/RISCV/shuffle-transpose.ll index c3f20c8..9e20171 100644 --- a/llvm/test/Analysis/CostModel/RISCV/shuffle-transpose.ll +++ b/llvm/test/Analysis/CostModel/RISCV/shuffle-transpose.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfhmin -riscv-v-vector-bits-min=-1 | FileCheck %s ; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfh -riscv-v-vector-bits-min=-1 | FileCheck %s --check-prefix=SIZE +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -mtriple=riscv32 -mattr=+v,+f,+d,+zfh,+zvfhmin -riscv-v-vector-bits-min=-1 | FileCheck %s --check-prefix=SIZE ; Check that we don't crash querying costs when vectors are not enabled. ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv32 diff --git a/llvm/test/Analysis/CostModel/RISCV/splice.ll b/llvm/test/Analysis/CostModel/RISCV/splice.ll index 9acccef..8d7d157 100644 --- a/llvm/test/Analysis/CostModel/RISCV/splice.ll +++ b/llvm/test/Analysis/CostModel/RISCV/splice.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s +; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s ; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh | FileCheck %s --check-prefix=SIZE +; RUN: opt < %s -passes="print<cost-model>" -cost-kind=code-size 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin | FileCheck %s --check-prefix=SIZE define void @vector_splice() { ; CHECK-LABEL: 'vector_splice' @@ -165,59 +167,3 @@ define void @vector_splice() { ret void } - -declare <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, i32) -declare <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, i32) -declare <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, i32) -declare <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i32) -declare <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, i32) -declare <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, i32) -declare <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, i32) - -declare <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, i32) -declare <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, i32) -declare <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, i32) -declare <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, i32) -declare <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, i32) -declare <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, i32) -declare <vscale x 64 x i16> @llvm.vector.splice.nxv64i16(<vscale x 64 x i16>, <vscale x 64 x i16>, i32) - -declare <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, i32) -declare <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, i32) -declare <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, i32) -declare <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, i32) -declare <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, i32) -declare <vscale x 32 x i32> @llvm.vector.splice.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, i32) -declare <vscale x 64 x i32> @llvm.vector.splice.nxv64i32(<vscale x 64 x i32>, <vscale x 64 x i32>, i32) - -declare <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i32) -declare <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, i32) -declare <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, i32) -declare <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, i32) -declare <vscale x 16 x i64> @llvm.vector.splice.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i64>, i32) -declare <vscale x 32 x i64> @llvm.vector.splice.nxv32i64(<vscale x 32 x i64>, <vscale x 32 x i64>, i32) -declare <vscale x 64 x i64> @llvm.vector.splice.nxv64i64(<vscale x 64 x i64>, <vscale x 64 x i64>, i32) - -declare <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, i32) -declare <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, i32) -declare <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, i32) -declare <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32) -declare <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, i32) -declare <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, i32) -declare <vscale x 64 x half> @llvm.vector.splice.nxv64f16(<vscale x 64 x half>, <vscale x 64 x half>, i32) - -declare <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, i32) -declare <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, i32) -declare <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32) -declare <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, i32) -declare <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, i32) -declare <vscale x 32 x float> @llvm.vector.splice.nxv32f32(<vscale x 32 x float>, <vscale x 32 x float>, i32) -declare <vscale x 64 x float> @llvm.vector.splice.nxv64f32(<vscale x 64 x float>, <vscale x 64 x float>, i32) - -declare <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, i32) -declare <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32) -declare <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, i32) -declare <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, i32) -declare <vscale x 16 x double> @llvm.vector.splice.nxv16f64(<vscale x 16 x double>, <vscale x 16 x double>, i32) -declare <vscale x 32 x double> @llvm.vector.splice.nxv32f64(<vscale x 32 x double>, <vscale x 32 x double>, i32) -declare <vscale x 64 x double> @llvm.vector.splice.nxv64f64(<vscale x 64 x double>, <vscale x 64 x double>, i32) diff --git a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll index 413bd21..37d6584 100644 --- a/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll +++ b/llvm/test/Analysis/ScalarEvolution/max-backedge-taken-count-guard-info.ll @@ -1595,6 +1595,145 @@ exit: ret i32 0 } +define void @ptr_induction_eq_1(ptr %a, ptr %b) { +; CHECK-LABEL: 'ptr_induction_eq_1' +; CHECK-NEXT: Classifying expressions for: @ptr_induction_eq_1 +; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop ], [ %a, %entry ] +; CHECK-NEXT: --> {%a,+,8}<nuw><%loop> U: full-set S: full-set Exits: ((8 * ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8))<nuw> + %a) LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 +; CHECK-NEXT: --> {(8 + %a),+,8}<nuw><%loop> U: full-set S: full-set Exits: (8 + (8 * ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8))<nuw> + %a) LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_eq_1 +; CHECK-NEXT: Loop %loop: backedge-taken count is ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8) +; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951 +; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (-1 * (ptrtoint ptr %a to i64)) + (ptrtoint ptr %b to i64)) /u 8) +; CHECK-NEXT: Loop %loop: Trip multiple is 1 +; +entry: + %cmp = icmp eq ptr %a, %b + br i1 %cmp, label %exit, label %loop + +loop: + %ptr.iv = phi ptr [ %ptr.iv.next, %loop ], [ %a, %entry ] + %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 + %exitcond = icmp eq ptr %ptr.iv.next, %b + br i1 %exitcond, label %exit, label %loop + +exit: + ret void +} + +define void @ptr_induction_eq_2(ptr %a, i64 %n) { +; CHECK-LABEL: 'ptr_induction_eq_2' +; CHECK-NEXT: Classifying expressions for: @ptr_induction_eq_2 +; CHECK-NEXT: %b = getelementptr inbounds ptr, ptr %a, i64 %n +; CHECK-NEXT: --> ((8 * %n)<nsw> + %a) U: full-set S: full-set +; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop ], [ %a, %entry ] +; CHECK-NEXT: --> {%a,+,8}<nuw><%loop> U: full-set S: full-set Exits: ((8 * ((-8 + (8 * %n)<nsw>) /u 8))<nuw> + %a) LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 +; CHECK-NEXT: --> {(8 + %a),+,8}<nuw><%loop> U: full-set S: full-set Exits: (8 + (8 * ((-8 + (8 * %n)<nsw>) /u 8))<nuw> + %a) LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_eq_2 +; CHECK-NEXT: Loop %loop: backedge-taken count is ((-8 + (8 * %n)<nsw>) /u 8) +; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951 +; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (8 * %n)<nsw>) /u 8) +; CHECK-NEXT: Loop %loop: Trip multiple is 1 +; +entry: + %b = getelementptr inbounds ptr, ptr %a, i64 %n + %cmp = icmp eq ptr %a, %b + br i1 %cmp, label %exit, label %loop + +loop: + %ptr.iv = phi ptr [ %ptr.iv.next, %loop ], [ %a, %entry ] + %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 + %exitcond = icmp eq ptr %ptr.iv.next, %b + br i1 %exitcond, label %exit, label %loop + +exit: + ret void +} + +; TODO: It feels like we should be able to calculate the symbolic max +; exit count for the loop.inc block here, in the same way as +; ptr_induction_eq_1. The problem seems to be in howFarToZero when the +; ControlsOnlyExit is set to false. +define void @ptr_induction_early_exit_eq_1(ptr %a, ptr %b, ptr %c) { +; CHECK-LABEL: 'ptr_induction_early_exit_eq_1' +; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_1 +; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a, %entry ] +; CHECK-NEXT: --> {%a,+,8}<nuw><%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %ld1 = load ptr, ptr %ptr.iv, align 8 +; CHECK-NEXT: --> %ld1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant } +; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 +; CHECK-NEXT: --> {(8 + %a),+,8}<nw><%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_early_exit_eq_1 +; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count. +; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE*** +; CHECK-NEXT: exit count for loop.inc: ***COULDNOTCOMPUTE*** +; CHECK-NEXT: Loop %loop: Unpredictable constant max backedge-taken count. +; CHECK-NEXT: Loop %loop: Unpredictable symbolic max backedge-taken count. +; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE*** +; CHECK-NEXT: symbolic max exit count for loop.inc: ***COULDNOTCOMPUTE*** +; +entry: + %cmp = icmp eq ptr %a, %b + br i1 %cmp, label %exit, label %loop + +loop: + %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a, %entry ] + %ld1 = load ptr, ptr %ptr.iv, align 8 + %earlyexitcond = icmp eq ptr %ld1, %c + br i1 %earlyexitcond, label %exit, label %loop.inc + +loop.inc: + %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 + %exitcond = icmp eq ptr %ptr.iv.next, %b + br i1 %exitcond, label %exit, label %loop + +exit: + ret void +} + +define void @ptr_induction_early_exit_eq_2(ptr %a, i64 %n, ptr %c) { +; CHECK-LABEL: 'ptr_induction_early_exit_eq_2' +; CHECK-NEXT: Classifying expressions for: @ptr_induction_early_exit_eq_2 +; CHECK-NEXT: %b = getelementptr inbounds ptr, ptr %a, i64 %n +; CHECK-NEXT: --> ((8 * %n)<nsw> + %a) U: full-set S: full-set +; CHECK-NEXT: %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a, %entry ] +; CHECK-NEXT: --> {%a,+,8}<nuw><%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: %ld1 = load ptr, ptr %ptr.iv, align 8 +; CHECK-NEXT: --> %ld1 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant } +; CHECK-NEXT: %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 +; CHECK-NEXT: --> {(8 + %a),+,8}<nw><%loop> U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Computable } +; CHECK-NEXT: Determining loop execution counts for: @ptr_induction_early_exit_eq_2 +; CHECK-NEXT: Loop %loop: <multiple exits> Unpredictable backedge-taken count. +; CHECK-NEXT: exit count for loop: ***COULDNOTCOMPUTE*** +; CHECK-NEXT: exit count for loop.inc: ((-8 + (8 * %n)<nsw>) /u 8) +; CHECK-NEXT: Loop %loop: constant max backedge-taken count is i64 2305843009213693951 +; CHECK-NEXT: Loop %loop: symbolic max backedge-taken count is ((-8 + (8 * %n)<nsw>) /u 8) +; CHECK-NEXT: symbolic max exit count for loop: ***COULDNOTCOMPUTE*** +; CHECK-NEXT: symbolic max exit count for loop.inc: ((-8 + (8 * %n)<nsw>) /u 8) +; +entry: + %b = getelementptr inbounds ptr, ptr %a, i64 %n + %cmp = icmp eq ptr %a, %b + br i1 %cmp, label %exit, label %loop + +loop: + %ptr.iv = phi ptr [ %ptr.iv.next, %loop.inc ], [ %a, %entry ] + %ld1 = load ptr, ptr %ptr.iv, align 8 + %earlyexitcond = icmp eq ptr %ld1, %c + br i1 %earlyexitcond, label %exit, label %loop.inc + +loop.inc: + %ptr.iv.next = getelementptr inbounds i8, ptr %ptr.iv, i64 8 + %exitcond = icmp eq ptr %ptr.iv.next, %b + br i1 %exitcond, label %exit, label %loop + +exit: + ret void +} + + define void @gep_addrec_nw(ptr %a) { ; CHECK-LABEL: 'gep_addrec_nw' ; CHECK-NEXT: Classifying expressions for: @gep_addrec_nw diff --git a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll index 99f2ff0..1a60f87 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll @@ -1,11 +1,14 @@ -; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s -; RUN: llc < %s -global-isel -global-isel-abort=1 -pass-remarks-missed=gisel* -mtriple=arm64-linux-gnu 2>&1 | FileCheck %s --check-prefixes=GISEL,FALLBACK +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefixes=CHECK,SDAG +; RUN: llc < %s -global-isel -global-isel-abort=1 -pass-remarks-missed=gisel* -mtriple=arm64-linux-gnu 2>&1 | FileCheck %s --check-prefixes=CHECK,GISEL,FALLBACK %0 = type { i64, i64 } define dso_local i128 @f0(ptr %p) nounwind readonly { ; CHECK-LABEL: f0: -; CHECK: ldxp {{x[0-9]+}}, {{x[0-9]+}}, [x0] +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldxp x0, x1, [x0] +; CHECK-NEXT: ret entry: %ldrexd = tail call %0 @llvm.aarch64.ldxp(ptr %p) %0 = extractvalue %0 %ldrexd, 1 @@ -19,7 +22,10 @@ entry: define dso_local i32 @f1(ptr %ptr, i128 %val) nounwind { ; CHECK-LABEL: f1: -; CHECK: stxp {{w[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, [x0] +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stxp w8, x2, x3, [x0] +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: ret entry: %tmp4 = trunc i128 %val to i64 %tmp6 = lshr i128 %val, 64 @@ -35,16 +41,21 @@ declare i32 @llvm.aarch64.stxp(i64, i64, ptr) nounwind ; FALLBACK-NOT: remark:{{.*}}test_load_i8 define dso_local void @test_load_i8(ptr %addr) { -; CHECK-LABEL: test_load_i8: -; CHECK: ldxrb w[[LOADVAL:[0-9]+]], [x0] -; CHECK-NOT: uxtb -; CHECK-NOT: and -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] - +; SDAG-LABEL: test_load_i8: +; SDAG: // %bb.0: +; SDAG-NEXT: ldxrb w8, [x0] +; SDAG-NEXT: adrp x9, var +; SDAG-NEXT: str x8, [x9, :lo12:var] +; SDAG-NEXT: ret +; ; GISEL-LABEL: test_load_i8: -; GISEL: ldxrb w[[LOADVAL:[0-9]+]], [x0] -; GISEL-NOT: uxtb -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; GISEL: // %bb.0: +; GISEL-NEXT: ldxrb w9, [x0] +; GISEL-NEXT: adrp x8, var +; GISEL-NEXT: and x9, x9, #0xff +; GISEL-NEXT: str x9, [x8, :lo12:var] +; GISEL-NEXT: ret + %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i8) %addr) %shortval = trunc i64 %val to i8 %extval = zext i8 %shortval to i64 @@ -54,16 +65,21 @@ define dso_local void @test_load_i8(ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_load_i16 define dso_local void @test_load_i16(ptr %addr) { -; CHECK-LABEL: test_load_i16: -; CHECK: ldxrh w[[LOADVAL:[0-9]+]], [x0] -; CHECK-NOT: uxth -; CHECK-NOT: and -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] - +; SDAG-LABEL: test_load_i16: +; SDAG: // %bb.0: +; SDAG-NEXT: ldxrh w8, [x0] +; SDAG-NEXT: adrp x9, var +; SDAG-NEXT: str x8, [x9, :lo12:var] +; SDAG-NEXT: ret +; ; GISEL-LABEL: test_load_i16: -; GISEL: ldxrh w[[LOADVAL:[0-9]+]], [x0] -; GISEL-NOT: uxtb -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; GISEL: // %bb.0: +; GISEL-NEXT: ldxrh w9, [x0] +; GISEL-NEXT: adrp x8, var +; GISEL-NEXT: and x9, x9, #0xffff +; GISEL-NEXT: str x9, [x8, :lo12:var] +; GISEL-NEXT: ret + %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i16) %addr) %shortval = trunc i64 %val to i16 %extval = zext i16 %shortval to i64 @@ -73,16 +89,21 @@ define dso_local void @test_load_i16(ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_load_i32 define dso_local void @test_load_i32(ptr %addr) { -; CHECK-LABEL: test_load_i32: -; CHECK: ldxr w[[LOADVAL:[0-9]+]], [x0] -; CHECK-NOT: uxtw -; CHECK-NOT: and -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] - +; SDAG-LABEL: test_load_i32: +; SDAG: // %bb.0: +; SDAG-NEXT: ldxr w8, [x0] +; SDAG-NEXT: adrp x9, var +; SDAG-NEXT: str x8, [x9, :lo12:var] +; SDAG-NEXT: ret +; ; GISEL-LABEL: test_load_i32: -; GISEL: ldxr w[[LOADVAL:[0-9]+]], [x0] -; GISEL-NOT: uxtb -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; GISEL: // %bb.0: +; GISEL-NEXT: ldxr w9, [x0] +; GISEL-NEXT: adrp x8, var +; GISEL-NEXT: mov w9, w9 +; GISEL-NEXT: str x9, [x8, :lo12:var] +; GISEL-NEXT: ret + %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %addr) %shortval = trunc i64 %val to i32 %extval = zext i32 %shortval to i64 @@ -93,13 +114,12 @@ define dso_local void @test_load_i32(ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_load_i64 define dso_local void @test_load_i64(ptr %addr) { ; CHECK-LABEL: test_load_i64: -; CHECK: ldxr x[[LOADVAL:[0-9]+]], [x0] -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; CHECK: // %bb.0: +; CHECK-NEXT: ldxr x8, [x0] +; CHECK-NEXT: adrp x9, var +; CHECK-NEXT: str x8, [x9, :lo12:var] +; CHECK-NEXT: ret -; GISEL-LABEL: test_load_i64: -; GISEL: ldxr x[[LOADVAL:[0-9]+]], [x0] -; GISEL-NOT: uxtb -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] %val = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i64) %addr) store i64 %val, ptr @var, align 8 ret void @@ -111,13 +131,10 @@ declare i64 @llvm.aarch64.ldxr.p0(ptr) nounwind ; FALLBACK-NOT: remark:{{.*}}test_store_i8 define dso_local i32 @test_store_i8(i32, i8 %val, ptr %addr) { ; CHECK-LABEL: test_store_i8: -; CHECK-NOT: uxtb -; CHECK-NOT: and -; CHECK: stxrb w0, w1, [x2] -; GISEL-LABEL: test_store_i8: -; GISEL-NOT: uxtb -; GISEL-NOT: and -; GISEL: stxrb w0, w1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 +; CHECK-NEXT: stxrb w0, w1, [x2] +; CHECK-NEXT: ret %extval = zext i8 %val to i64 %res = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i8) %addr) ret i32 %res @@ -126,13 +143,10 @@ define dso_local i32 @test_store_i8(i32, i8 %val, ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_store_i16 define dso_local i32 @test_store_i16(i32, i16 %val, ptr %addr) { ; CHECK-LABEL: test_store_i16: -; CHECK-NOT: uxth -; CHECK-NOT: and -; CHECK: stxrh w0, w1, [x2] -; GISEL-LABEL: test_store_i16: -; GISEL-NOT: uxth -; GISEL-NOT: and -; GISEL: stxrh w0, w1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 +; CHECK-NEXT: stxrh w0, w1, [x2] +; CHECK-NEXT: ret %extval = zext i16 %val to i64 %res = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i16) %addr) ret i32 %res @@ -141,13 +155,9 @@ define dso_local i32 @test_store_i16(i32, i16 %val, ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_store_i32 define dso_local i32 @test_store_i32(i32, i32 %val, ptr %addr) { ; CHECK-LABEL: test_store_i32: -; CHECK-NOT: uxtw -; CHECK-NOT: and -; CHECK: stxr w0, w1, [x2] -; GISEL-LABEL: test_store_i32: -; GISEL-NOT: uxtw -; GISEL-NOT: and -; GISEL: stxr w0, w1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: stxr w0, w1, [x2] +; CHECK-NEXT: ret %extval = zext i32 %val to i64 %res = call i32 @llvm.aarch64.stxr.p0(i64 %extval, ptr elementtype(i32) %addr) ret i32 %res @@ -156,18 +166,20 @@ define dso_local i32 @test_store_i32(i32, i32 %val, ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_store_i64 define dso_local i32 @test_store_i64(i32, i64 %val, ptr %addr) { ; CHECK-LABEL: test_store_i64: -; CHECK: stxr w0, x1, [x2] -; GISEL-LABEL: test_store_i64: -; GISEL: stxr w0, x1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: stxr w0, x1, [x2] +; CHECK-NEXT: ret %res = call i32 @llvm.aarch64.stxr.p0(i64 %val, ptr elementtype(i64) %addr) ret i32 %res } declare i32 @llvm.aarch64.stxr.p0(i64, ptr) nounwind -; CHECK: test_clear: -; CHECK: clrex define dso_local void @test_clear() { +; CHECK-LABEL: test_clear: +; CHECK: // %bb.0: +; CHECK-NEXT: clrex +; CHECK-NEXT: ret call void @llvm.aarch64.clrex() ret void } @@ -176,7 +188,9 @@ declare void @llvm.aarch64.clrex() nounwind define dso_local i128 @test_load_acquire_i128(ptr %p) nounwind readonly { ; CHECK-LABEL: test_load_acquire_i128: -; CHECK: ldaxp {{x[0-9]+}}, {{x[0-9]+}}, [x0] +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ldaxp x0, x1, [x0] +; CHECK-NEXT: ret entry: %ldrexd = tail call %0 @llvm.aarch64.ldaxp(ptr %p) %0 = extractvalue %0 %ldrexd, 1 @@ -190,7 +204,10 @@ entry: define dso_local i32 @test_store_release_i128(ptr %ptr, i128 %val) nounwind { ; CHECK-LABEL: test_store_release_i128: -; CHECK: stlxp {{w[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, [x0] +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: stlxp w8, x2, x3, [x0] +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: ret entry: %tmp4 = trunc i128 %val to i64 %tmp6 = lshr i128 %val, 64 @@ -204,15 +221,21 @@ declare i32 @llvm.aarch64.stlxp(i64, i64, ptr) nounwind ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i8 define dso_local void @test_load_acquire_i8(ptr %addr) { -; CHECK-LABEL: test_load_acquire_i8: -; CHECK: ldaxrb w[[LOADVAL:[0-9]+]], [x0] -; CHECK-NOT: uxtb -; CHECK-NOT: and -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] - +; SDAG-LABEL: test_load_acquire_i8: +; SDAG: // %bb.0: +; SDAG-NEXT: ldaxrb w8, [x0] +; SDAG-NEXT: adrp x9, var +; SDAG-NEXT: str x8, [x9, :lo12:var] +; SDAG-NEXT: ret +; ; GISEL-LABEL: test_load_acquire_i8: -; GISEL: ldaxrb w[[LOADVAL:[0-9]+]], [x0] -; GISEL-DAG: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; GISEL: // %bb.0: +; GISEL-NEXT: ldaxrb w9, [x0] +; GISEL-NEXT: adrp x8, var +; GISEL-NEXT: and x9, x9, #0xff +; GISEL-NEXT: str x9, [x8, :lo12:var] +; GISEL-NEXT: ret + %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i8) %addr) %shortval = trunc i64 %val to i8 %extval = zext i8 %shortval to i64 @@ -222,15 +245,21 @@ define dso_local void @test_load_acquire_i8(ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i16 define dso_local void @test_load_acquire_i16(ptr %addr) { -; CHECK-LABEL: test_load_acquire_i16: -; CHECK: ldaxrh w[[LOADVAL:[0-9]+]], [x0] -; CHECK-NOT: uxth -; CHECK-NOT: and -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] - +; SDAG-LABEL: test_load_acquire_i16: +; SDAG: // %bb.0: +; SDAG-NEXT: ldaxrh w8, [x0] +; SDAG-NEXT: adrp x9, var +; SDAG-NEXT: str x8, [x9, :lo12:var] +; SDAG-NEXT: ret +; ; GISEL-LABEL: test_load_acquire_i16: -; GISEL: ldaxrh w[[LOADVAL:[0-9]+]], [x0] -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; GISEL: // %bb.0: +; GISEL-NEXT: ldaxrh w9, [x0] +; GISEL-NEXT: adrp x8, var +; GISEL-NEXT: and x9, x9, #0xffff +; GISEL-NEXT: str x9, [x8, :lo12:var] +; GISEL-NEXT: ret + %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i16) %addr) %shortval = trunc i64 %val to i16 %extval = zext i16 %shortval to i64 @@ -240,15 +269,21 @@ define dso_local void @test_load_acquire_i16(ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i32 define dso_local void @test_load_acquire_i32(ptr %addr) { -; CHECK-LABEL: test_load_acquire_i32: -; CHECK: ldaxr w[[LOADVAL:[0-9]+]], [x0] -; CHECK-NOT: uxtw -; CHECK-NOT: and -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] - +; SDAG-LABEL: test_load_acquire_i32: +; SDAG: // %bb.0: +; SDAG-NEXT: ldaxr w8, [x0] +; SDAG-NEXT: adrp x9, var +; SDAG-NEXT: str x8, [x9, :lo12:var] +; SDAG-NEXT: ret +; ; GISEL-LABEL: test_load_acquire_i32: -; GISEL: ldaxr w[[LOADVAL:[0-9]+]], [x0] -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; GISEL: // %bb.0: +; GISEL-NEXT: ldaxr w9, [x0] +; GISEL-NEXT: adrp x8, var +; GISEL-NEXT: mov w9, w9 +; GISEL-NEXT: str x9, [x8, :lo12:var] +; GISEL-NEXT: ret + %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32) %addr) %shortval = trunc i64 %val to i32 %extval = zext i32 %shortval to i64 @@ -259,12 +294,12 @@ define dso_local void @test_load_acquire_i32(ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_load_acquire_i64 define dso_local void @test_load_acquire_i64(ptr %addr) { ; CHECK-LABEL: test_load_acquire_i64: -; CHECK: ldaxr x[[LOADVAL:[0-9]+]], [x0] -; CHECK: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] +; CHECK: // %bb.0: +; CHECK-NEXT: ldaxr x8, [x0] +; CHECK-NEXT: adrp x9, var +; CHECK-NEXT: str x8, [x9, :lo12:var] +; CHECK-NEXT: ret -; GISEL-LABEL: test_load_acquire_i64: -; GISEL: ldaxr x[[LOADVAL:[0-9]+]], [x0] -; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var] %val = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i64) %addr) store i64 %val, ptr @var, align 8 ret void @@ -276,13 +311,10 @@ declare i64 @llvm.aarch64.ldaxr.p0(ptr) nounwind ; FALLBACK-NOT: remark:{{.*}}test_store_release_i8 define dso_local i32 @test_store_release_i8(i32, i8 %val, ptr %addr) { ; CHECK-LABEL: test_store_release_i8: -; CHECK-NOT: uxtb -; CHECK-NOT: and -; CHECK: stlxrb w0, w1, [x2] -; GISEL-LABEL: test_store_release_i8: -; GISEL-NOT: uxtb -; GISEL-NOT: and -; GISEL: stlxrb w0, w1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 +; CHECK-NEXT: stlxrb w0, w1, [x2] +; CHECK-NEXT: ret %extval = zext i8 %val to i64 %res = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i8) %addr) ret i32 %res @@ -291,13 +323,10 @@ define dso_local i32 @test_store_release_i8(i32, i8 %val, ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_store_release_i16 define dso_local i32 @test_store_release_i16(i32, i16 %val, ptr %addr) { ; CHECK-LABEL: test_store_release_i16: -; CHECK-NOT: uxth -; CHECK-NOT: and -; CHECK: stlxrh w0, w1, [x2] -; GISEL-LABEL: test_store_release_i16: -; GISEL-NOT: uxth -; GISEL-NOT: and -; GISEL: stlxrh w0, w1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 +; CHECK-NEXT: stlxrh w0, w1, [x2] +; CHECK-NEXT: ret %extval = zext i16 %val to i64 %res = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i16) %addr) ret i32 %res @@ -306,13 +335,9 @@ define dso_local i32 @test_store_release_i16(i32, i16 %val, ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_store_release_i32 define dso_local i32 @test_store_release_i32(i32, i32 %val, ptr %addr) { ; CHECK-LABEL: test_store_release_i32: -; CHECK-NOT: uxtw -; CHECK-NOT: and -; CHECK: stlxr w0, w1, [x2] -; GISEL-LABEL: test_store_release_i32: -; GISEL-NOT: uxtw -; GISEL-NOT: and -; GISEL: stlxr w0, w1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: stlxr w0, w1, [x2] +; CHECK-NEXT: ret %extval = zext i32 %val to i64 %res = call i32 @llvm.aarch64.stlxr.p0(i64 %extval, ptr elementtype(i32) %addr) ret i32 %res @@ -321,11 +346,25 @@ define dso_local i32 @test_store_release_i32(i32, i32 %val, ptr %addr) { ; FALLBACK-NOT: remark:{{.*}}test_store_release_i64 define dso_local i32 @test_store_release_i64(i32, i64 %val, ptr %addr) { ; CHECK-LABEL: test_store_release_i64: -; CHECK: stlxr w0, x1, [x2] -; GISEL-LABEL: test_store_release_i64: -; GISEL: stlxr w0, x1, [x2] +; CHECK: // %bb.0: +; CHECK-NEXT: stlxr w0, x1, [x2] +; CHECK-NEXT: ret %res = call i32 @llvm.aarch64.stlxr.p0(i64 %val, ptr elementtype(i64) %addr) ret i32 %res } +; The stxp result cannot be allocated to the same register as the inputs. +; FIXME: This is a miscompile. +define dso_local i32 @test_stxp_undef(ptr %p, i64 %x) nounwind { +; CHECK-LABEL: test_stxp_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: stxp w8, x8, x1, [x0] +; CHECK-NEXT: mov w0, w8 +; CHECK-NEXT: ret + %res = call i32 @llvm.aarch64.stxp(i64 undef, i64 %x, ptr %p) + ret i32 %res +} + declare i32 @llvm.aarch64.stlxr.p0(i64, ptr) nounwind +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; FALLBACK: {{.*}} diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll new file mode 100644 index 0000000..668ebe3 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=GFX11 +; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s -check-prefix=GFX12 + +define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "amdgpu-flat-work-group-size"="1,1" { +; GFX11-LABEL: test_uniform_load_b96: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: v_mov_b32_e32 v3, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3] +; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0 +; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v2, s3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_nop 0 +; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: test_uniform_load_b96: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: v_mov_b32_e32 v3, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_lshlrev_b64_e32 v[2:3], 2, v[2:3] +; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT: v_readfirstlane_b32 s0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_readfirstlane_b32 s1, v3 +; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_nop 0 +; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX12-NEXT: s_endpgm +bb: + %i = zext i32 %arg to i64 + %i1 = getelementptr i32, ptr addrspace(1) %ptr, i64 %i + %i2 = load <3 x i32>, ptr addrspace(1) %i1, align 4 + %i3 = extractelement <3 x i32> %i2, i32 0 + %i4 = extractelement <3 x i32> %i2, i32 1 + %i5 = extractelement <3 x i32> %i2, i32 2 + %i6 = or i32 %i3, %i4 + %i7 = or i32 %i5, %i6 + store i32 %i7, ptr addrspace(1) %ptr, align 4 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/phi-vgpr-input-moveimm.mir b/llvm/test/CodeGen/AMDGPU/phi-vgpr-input-moveimm.mir index f931acb..dab4c9d 100644 --- a/llvm/test/CodeGen/AMDGPU/phi-vgpr-input-moveimm.mir +++ b/llvm/test/CodeGen/AMDGPU/phi-vgpr-input-moveimm.mir @@ -1,12 +1,36 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 # RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=si-fix-sgpr-copies -o - %s | FileCheck -check-prefix=GCN %s --- -# GCN-LABEL: name: phi_moveimm_input -# GCN-NOT: %{{[0-9]+}}:vgpr_32 = PHI %{{[0-9]+}}, %bb.3, %{{[0-9]+}}, %bb.1 -# GCN: %{{[0-9]+}}:sreg_32 = PHI %{{[0-9]+}}, %bb.3, %{{[0-9]+}}, %bb.1 name: phi_moveimm_input tracksRegLiveness: true body: | + ; GCN-LABEL: name: phi_moveimm_input + ; GCN: bb.0: + ; GCN-NEXT: successors: %bb.1(0x80000000) + ; GCN-NEXT: liveins: $sgpr0, $sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.1: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY]], [[COPY1]], implicit-def $scc + ; GCN-NEXT: S_BRANCH %bb.2 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.2: + ; GCN-NEXT: successors: %bb.3(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[PHI:%[0-9]+]]:sreg_32 = PHI %5, %bb.3, [[S_ADD_U32_]], %bb.1 + ; GCN-NEXT: S_BRANCH %bb.3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.3: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GCN-NEXT: S_BRANCH %bb.2 bb.0: successors: %bb.1 liveins: $sgpr0, $sgpr1 @@ -33,44 +57,94 @@ body: | ... --- -# GCN-LABEL: name: phi_moveimm_subreg_input -# GCN: %{{[0-9]+}}:sreg_64 = PHI %{{[0-9]+}}, %bb.3, %{{[0-9]+}}, %bb.1 name: phi_moveimm_subreg_input tracksRegLiveness: true body: | + ; GCN-LABEL: name: phi_moveimm_subreg_input + ; GCN: bb.0: + ; GCN-NEXT: successors: %bb.1(0x80000000) + ; GCN-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[V_MOV_B64_e32_:%[0-9]+]]:vreg_64 = V_MOV_B64_e32 0, implicit $exec + ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.1: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[S_ADD_U:%[0-9]+]]:sreg_64 = S_ADD_U64_PSEUDO [[COPY]], [[COPY1]], implicit-def $scc + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[S_ADD_U]].sub0, implicit $exec + ; GCN-NEXT: S_BRANCH %bb.2 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.2: + ; GCN-NEXT: successors: %bb.3(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[V_MOV_B64_e32_]].sub0, %bb.3, [[COPY2]], %bb.1 + ; GCN-NEXT: S_BRANCH %bb.3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.3: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF + ; GCN-NEXT: S_BRANCH %bb.2 bb.0: successors: %bb.1 - liveins: $sgpr0, $sgpr1 + liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3 - %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %0:vreg_64 = V_MOV_B64_e32 0, implicit $exec - %4:sreg_32 = COPY $sgpr0 - %5:sreg_32 = COPY $sgpr1 + %4:sreg_64 = COPY $sgpr0_sgpr1 + %5:sreg_64 = COPY $sgpr2_sgpr3 bb.1: successors: %bb.2 - undef %2.sub0:sreg_64 = S_ADD_U32 %4, %5, implicit-def $scc + %2:sreg_64 = S_ADD_U64_PSEUDO %4, %5, implicit-def $scc S_BRANCH %bb.2 bb.2: successors: %bb.3 - %3:sreg_64 = PHI %1, %bb.3, %2, %bb.1 + %3:sreg_32 = PHI %1.sub0:sreg_64, %bb.3, %2.sub0:sreg_64, %bb.1 S_BRANCH %bb.3 bb.3: successors: %bb.2 - undef %1.sub0:sreg_64 = COPY %0 + %1:sreg_64 = COPY %0.sub0:vreg_64 S_BRANCH %bb.2 ... --- -# GCN-LABEL: name: phi_moveimm_bad_opcode_input -# GCN-NOT: %{{[0-9]+}}:sreg_32 = PHI %{{[0-9]+}}, %bb.3, %{{[0-9]+}}, %bb.1 -# GCN: %{{[0-9]+}}:vgpr_32 = PHI %{{[0-9]+}}, %bb.3, %{{[0-9]+}}, %bb.1 name: phi_moveimm_bad_opcode_input tracksRegLiveness: true body: | + ; GCN-LABEL: name: phi_moveimm_bad_opcode_input + ; GCN: bb.0: + ; GCN-NEXT: successors: %bb.1(0x80000000) + ; GCN-NEXT: liveins: $sgpr0, $sgpr1, $vgpr0 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[V_MOV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_MOV_B32_sdwa 0, [[COPY]], 0, 5, 2, 4, implicit $exec, implicit [[COPY]](tied-def 0) + ; GCN-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GCN-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.1: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[S_ADD_U32_:%[0-9]+]]:sreg_32 = S_ADD_U32 [[COPY1]], [[COPY2]], implicit-def $scc + ; GCN-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[S_ADD_U32_]], implicit $exec + ; GCN-NEXT: S_BRANCH %bb.2 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.2: + ; GCN-NEXT: successors: %bb.3(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[V_MOV_B32_sdwa]], %bb.3, [[COPY3]], %bb.1 + ; GCN-NEXT: S_BRANCH %bb.3 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: bb.3: + ; GCN-NEXT: successors: %bb.2(0x80000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF + ; GCN-NEXT: S_BRANCH %bb.2 bb.0: successors: %bb.1 liveins: $sgpr0, $sgpr1, $vgpr0 diff --git a/llvm/test/CodeGen/DirectX/step.ll b/llvm/test/CodeGen/DirectX/step.ll new file mode 100644 index 0000000..0393c15 --- /dev/null +++ b/llvm/test/CodeGen/DirectX/step.ll @@ -0,0 +1,78 @@ +; RUN: opt -S -dxil-intrinsic-expansion < %s | FileCheck %s --check-prefix=CHECK
+; RUN: opt -S -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library < %s | FileCheck %s --check-prefix=CHECK
+
+; Make sure dxil operation function calls for step are generated for half/float.
+
+declare half @llvm.dx.step.f16(half, half)
+declare <2 x half> @llvm.dx.step.v2f16(<2 x half>, <2 x half>)
+declare <3 x half> @llvm.dx.step.v3f16(<3 x half>, <3 x half>)
+declare <4 x half> @llvm.dx.step.v4f16(<4 x half>, <4 x half>)
+
+declare float @llvm.dx.step.f32(float, float)
+declare <2 x float> @llvm.dx.step.v2f32(<2 x float>, <2 x float>)
+declare <3 x float> @llvm.dx.step.v3f32(<3 x float>, <3 x float>)
+declare <4 x float> @llvm.dx.step.v4f32(<4 x float>, <4 x float>)
+
+define noundef half @test_step_half(half noundef %p0, half noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt half %p1, %p0
+ ; CHECK: %1 = select i1 %0, half 0xH0000, half 0xH3C00
+ %hlsl.step = call half @llvm.dx.step.f16(half %p0, half %p1)
+ ret half %hlsl.step
+}
+
+define noundef <2 x half> @test_step_half2(<2 x half> noundef %p0, <2 x half> noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt <2 x half> %p1, %p0
+ ; CHECK: %1 = select <2 x i1> %0, <2 x half> zeroinitializer, <2 x half> <half 0xH3C00, half 0xH3C00>
+ %hlsl.step = call <2 x half> @llvm.dx.step.v2f16(<2 x half> %p0, <2 x half> %p1)
+ ret <2 x half> %hlsl.step
+}
+
+define noundef <3 x half> @test_step_half3(<3 x half> noundef %p0, <3 x half> noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt <3 x half> %p1, %p0
+ ; CHECK: %1 = select <3 x i1> %0, <3 x half> zeroinitializer, <3 x half> <half 0xH3C00, half 0xH3C00, half 0xH3C00>
+ %hlsl.step = call <3 x half> @llvm.dx.step.v3f16(<3 x half> %p0, <3 x half> %p1)
+ ret <3 x half> %hlsl.step
+}
+
+define noundef <4 x half> @test_step_half4(<4 x half> noundef %p0, <4 x half> noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt <4 x half> %p1, %p0
+ ; CHECK: %1 = select <4 x i1> %0, <4 x half> zeroinitializer, <4 x half> <half 0xH3C00, half 0xH3C00, half 0xH3C00, half 0xH3C00>
+ %hlsl.step = call <4 x half> @llvm.dx.step.v4f16(<4 x half> %p0, <4 x half> %p1)
+ ret <4 x half> %hlsl.step
+}
+
+define noundef float @test_step_float(float noundef %p0, float noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt float %p1, %p0
+ ; CHECK: %1 = select i1 %0, float 0.000000e+00, float 1.000000e+00
+ %hlsl.step = call float @llvm.dx.step.f32(float %p0, float %p1)
+ ret float %hlsl.step
+}
+
+define noundef <2 x float> @test_step_float2(<2 x float> noundef %p0, <2 x float> noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt <2 x float> %p1, %p0
+ ; CHECK: %1 = select <2 x i1> %0, <2 x float> zeroinitializer, <2 x float> <float 1.000000e+00, float 1.000000e+00>
+ %hlsl.step = call <2 x float> @llvm.dx.step.v2f32(<2 x float> %p0, <2 x float> %p1)
+ ret <2 x float> %hlsl.step
+}
+
+define noundef <3 x float> @test_step_float3(<3 x float> noundef %p0, <3 x float> noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt <3 x float> %p1, %p0
+ ; CHECK: %1 = select <3 x i1> %0, <3 x float> zeroinitializer, <3 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+ %hlsl.step = call <3 x float> @llvm.dx.step.v3f32(<3 x float> %p0, <3 x float> %p1)
+ ret <3 x float> %hlsl.step
+}
+
+define noundef <4 x float> @test_step_float4(<4 x float> noundef %p0, <4 x float> noundef %p1) {
+entry:
+ ; CHECK: %0 = fcmp olt <4 x float> %p1, %p0
+ ; CHECK: %1 = select <4 x i1> %0, <4 x float> zeroinitializer, <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+ %hlsl.step = call <4 x float> @llvm.dx.step.v4f32(<4 x float> %p0, <4 x float> %p1)
+ ret <4 x float> %hlsl.step
+}
diff --git a/llvm/test/CodeGen/LoongArch/statepoint-call-lowering-r1.ll b/llvm/test/CodeGen/LoongArch/statepoint-call-lowering-r1.ll new file mode 100644 index 0000000..4a77b2c --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/statepoint-call-lowering-r1.ll @@ -0,0 +1,13 @@ +; RUN: llc --mtriple=loongarch64 --verify-machineinstrs --stop-after=prologepilog < %s | FileCheck %s + +;; Check that STATEPOINT instruction has an early clobber implicit def for R1. + +define void @test() gc "statepoint-example" { +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" ()] +; CHECK: STATEPOINT 0, 0, 0, target-flags(loongarch-call-plt) @return_i1, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, csr_ilp32s_lp64s, implicit-def $r3, implicit-def dead early-clobber $r1 + ret void +} + +declare void @return_i1() +declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) diff --git a/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll b/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll new file mode 100644 index 0000000..6956929 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/statepoint-call-lowering.ll @@ -0,0 +1,228 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc --mtriple=loongarch64 -verify-machineinstrs < %s | FileCheck %s +;; A collection of basic functionality tests for statepoint lowering - most +;; interesting cornercases are exercised through the x86 tests. + +%struct = type { i64, i64 } + +declare zeroext i1 @return_i1() +declare zeroext i32 @return_i32() +declare ptr @return_i32ptr() +declare float @return_float() +declare %struct @return_struct() +declare void @varargf(i32, ...) + +define i1 @test_i1_return() nounwind gc "statepoint-example" { +;; This is just checking that a i1 gets lowered normally when there's no extra +;; state arguments to the statepoint +; CHECK-LABEL: test_i1_return: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: bl %plt(return_i1) +; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) + %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token) + ret i1 %call1 +} + +define i32 @test_i32_return() nounwind gc "statepoint-example" { +; CHECK-LABEL: test_i32_return: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: bl %plt(return_i32) +; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i32 ()) @return_i32, i32 0, i32 0, i32 0, i32 0) + %call1 = call zeroext i32 @llvm.experimental.gc.result.i32(token %safepoint_token) + ret i32 %call1 +} + +define ptr @test_i32ptr_return() nounwind gc "statepoint-example" { +; CHECK-LABEL: test_i32ptr_return: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: bl %plt(return_i32ptr) +; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(ptr ()) @return_i32ptr, i32 0, i32 0, i32 0, i32 0) + %call1 = call ptr @llvm.experimental.gc.result.p0(token %safepoint_token) + ret ptr %call1 +} + +define float @test_float_return() nounwind gc "statepoint-example" { +; CHECK-LABEL: test_float_return: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: bl %plt(return_float) +; CHECK-NEXT: .Ltmp3: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(float ()) @return_float, i32 0, i32 0, i32 0, i32 0) + %call1 = call float @llvm.experimental.gc.result.f32(token %safepoint_token) + ret float %call1 +} + +define %struct @test_struct_return() nounwind gc "statepoint-example" { +; CHECK-LABEL: test_struct_return: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: bl %plt(return_struct) +; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(%struct ()) @return_struct, i32 0, i32 0, i32 0, i32 0) + %call1 = call %struct @llvm.experimental.gc.result.struct(token %safepoint_token) + ret %struct %call1 +} + +define i1 @test_relocate(ptr addrspace(1) %a) nounwind gc "statepoint-example" { +; CHECK-LABEL: test_relocate: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: st.d $a0, $sp, 0 +; CHECK-NEXT: bl %plt(return_i1) +; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)] + %call1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0) + %call2 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token) + ret i1 %call2 +} + +define void @test_void_vararg() nounwind gc "statepoint-example" { +; CHECK-LABEL: test_void_vararg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: ori $a0, $zero, 42 +; CHECK-NEXT: ori $a1, $zero, 43 +; CHECK-NEXT: bl %plt(varargf) +; CHECK-NEXT: .Ltmp6: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void (i32, ...)) @varargf, i32 2, i32 0, i32 42, i32 43, i32 0, i32 0) + ;; if we try to use the result from a statepoint wrapping a + ;; non-void-returning varargf, we will experience a crash. + ret void +} + +define i1 @test_i1_return_patchable() nounwind gc "statepoint-example" { +;; A patchable variant of test_i1_return +; CHECK-LABEL: test_i1_return_patchable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -16 +; CHECK-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; CHECK-NEXT: nop +; CHECK-NEXT: .Ltmp7: +; CHECK-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 16 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 4, ptr elementtype(i1 ()) null, i32 0, i32 0, i32 0, i32 0) + %call1 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token) + ret i1 %call1 +} + +declare void @consume(ptr addrspace(1) %obj) + +define i1 @test_cross_bb(ptr addrspace(1) %a, i1 %external_cond) nounwind gc "statepoint-example" { +; CHECK-LABEL: test_cross_bb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -32 +; CHECK-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill +; CHECK-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill +; CHECK-NEXT: andi $fp, $a1, 1 +; CHECK-NEXT: st.d $a0, $sp, 8 +; CHECK-NEXT: bl %plt(return_i1) +; CHECK-NEXT: .Ltmp8: +; CHECK-NEXT: beqz $fp, .LBB8_2 +; CHECK-NEXT: # %bb.1: # %left +; CHECK-NEXT: ld.d $a1, $sp, 8 +; CHECK-NEXT: move $fp, $a0 +; CHECK-NEXT: move $a0, $a1 +; CHECK-NEXT: bl %plt(consume) +; CHECK-NEXT: move $a0, $fp +; CHECK-NEXT: b .LBB8_3 +; CHECK-NEXT: .LBB8_2: # %right +; CHECK-NEXT: ori $a0, $zero, 1 +; CHECK-NEXT: .LBB8_3: # %right +; CHECK-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload +; CHECK-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 32 +; CHECK-NEXT: ret +entry: + %safepoint_token = tail call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(i1 ()) @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (ptr addrspace(1) %a)] + br i1 %external_cond, label %left, label %right + +left: + %call1 = call ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token %safepoint_token, i32 0, i32 0) + %call2 = call zeroext i1 @llvm.experimental.gc.result.i1(token %safepoint_token) + call void @consume(ptr addrspace(1) %call1) + ret i1 %call2 + +right: + ret i1 true +} + +%struct2 = type { i64, i64, i64 } + +declare void @consume_attributes(i32, ptr nest, i32, ptr byval(%struct2)) + +define void @test_attributes(ptr byval(%struct2) %s) nounwind gc "statepoint-example" { +; CHECK-LABEL: test_attributes: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi.d $sp, $sp, -32 +; CHECK-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill +; CHECK-NEXT: ld.d $a1, $a0, 16 +; CHECK-NEXT: st.d $a1, $sp, 16 +; CHECK-NEXT: ld.d $a1, $a0, 8 +; CHECK-NEXT: st.d $a1, $sp, 8 +; CHECK-NEXT: ld.d $a0, $a0, 0 +; CHECK-NEXT: st.d $a0, $sp, 0 +; CHECK-NEXT: ori $a0, $zero, 42 +; CHECK-NEXT: ori $a2, $zero, 17 +; CHECK-NEXT: addi.d $a3, $sp, 0 +; CHECK-NEXT: move $a1, $zero +; CHECK-NEXT: bl %plt(consume_attributes) +; CHECK-NEXT: .Ltmp9: +; CHECK-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload +; CHECK-NEXT: addi.d $sp, $sp, 32 +; CHECK-NEXT: ret +entry: +;; We call a function that has a nest argument and a byval argument. + %statepoint_token = call token (i64, i32, ptr, i32, i32, ...) @llvm.experimental.gc.statepoint.p0(i64 0, i32 0, ptr elementtype(void (i32, ptr, i32, ptr)) @consume_attributes, i32 4, i32 0, i32 42, ptr nest null, i32 17, ptr byval(%struct2) %s, i32 0, i32 0) + ret void +} + +declare token @llvm.experimental.gc.statepoint.p0(i64, i32, ptr, i32, i32, ...) +declare i1 @llvm.experimental.gc.result.i1(token) +declare i32 @llvm.experimental.gc.result.i32(token) +declare ptr @llvm.experimental.gc.result.p0(token) +declare float @llvm.experimental.gc.result.f32(token) +declare %struct @llvm.experimental.gc.result.struct(token) +declare ptr addrspace(1) @llvm.experimental.gc.relocate.p1(token, i32, i32) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/underaligned-load-store.ll b/llvm/test/CodeGen/Mips/Fast-ISel/underaligned-load-store.ll new file mode 100644 index 0000000..5a93ece --- /dev/null +++ b/llvm/test/CodeGen/Mips/Fast-ISel/underaligned-load-store.ll @@ -0,0 +1,109 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -march mips -fast-isel -relocation-model pic | FileCheck %s -check-prefixes=MIPS-32 +; RUN: llc < %s -march mips64 -fast-isel -relocation-model pic | FileCheck %s -check-prefixes=MIPS-64 +; RUN: llc < %s -march mips -mcpu mips32r6 -fast-isel -relocation-model pic | FileCheck %s -check-prefixes=MIPS-32-R6 +; RUN: llc < %s -march mips -mcpu mips32r6 -mattr +strict-align -fast-isel -relocation-model pic | FileCheck %s -check-prefixes=MIPS-32-R6-STRICT +; RUN: llc < %s -march mips64 -mcpu mips64r6 -fast-isel -relocation-model pic | FileCheck %s -check-prefixes=MIPS-64-R6 +; RUN: llc < %s -march mips64 -mcpu mips64r6 -mattr +strict-align -fast-isel -relocation-model pic | FileCheck %s -check-prefixes=MIPS-64-R6-STRICT + +@var = external global i32, align 1 + +; FastISel should bail on the underaligned load and store, except on r6 with non-strict alignment. +define dso_local ccc i32 @__start() { +; MIPS-32-LABEL: __start: +; MIPS-32: # %bb.0: +; MIPS-32-NEXT: lui $2, %hi(_gp_disp) +; MIPS-32-NEXT: addiu $2, $2, %lo(_gp_disp) +; MIPS-32-NEXT: addu $1, $2, $25 +; MIPS-32-NEXT: lw $1, %got(var)($1) +; MIPS-32-NEXT: lwl $2, 0($1) +; MIPS-32-NEXT: lwr $2, 3($1) +; MIPS-32-NEXT: addiu $3, $zero, 42 +; MIPS-32-NEXT: swl $3, 0($1) +; MIPS-32-NEXT: jr $ra +; MIPS-32-NEXT: swr $3, 3($1) +; +; MIPS-64-LABEL: __start: +; MIPS-64: # %bb.0: +; MIPS-64-NEXT: lui $1, %hi(%neg(%gp_rel(__start))) +; MIPS-64-NEXT: daddu $1, $1, $25 +; MIPS-64-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(__start))) +; MIPS-64-NEXT: ld $1, %got_disp(var)($1) +; MIPS-64-NEXT: lwl $2, 0($1) +; MIPS-64-NEXT: lwr $2, 3($1) +; MIPS-64-NEXT: addiu $3, $zero, 42 +; MIPS-64-NEXT: swl $3, 0($1) +; MIPS-64-NEXT: jr $ra +; MIPS-64-NEXT: swr $3, 3($1) +; +; MIPS-32-R6-LABEL: __start: +; MIPS-32-R6: # %bb.0: +; MIPS-32-R6-NEXT: lui $2, %hi(_gp_disp) +; MIPS-32-R6-NEXT: addiu $2, $2, %lo(_gp_disp) +; MIPS-32-R6-NEXT: addu $1, $2, $25 +; MIPS-32-R6-NEXT: lw $1, %got(var)($1) +; MIPS-32-R6-NEXT: lw $2, 0($1) +; MIPS-32-R6-NEXT: addiu $3, $zero, 42 +; MIPS-32-R6-NEXT: jr $ra +; MIPS-32-R6-NEXT: sw $3, 0($1) +; +; MIPS-32-R6-STRICT-LABEL: __start: +; MIPS-32-R6-STRICT: # %bb.0: +; MIPS-32-R6-STRICT-NEXT: lui $2, %hi(_gp_disp) +; MIPS-32-R6-STRICT-NEXT: addiu $2, $2, %lo(_gp_disp) +; MIPS-32-R6-STRICT-NEXT: addu $1, $2, $25 +; MIPS-32-R6-STRICT-NEXT: lw $1, %got(var)($1) +; MIPS-32-R6-STRICT-NEXT: lbu $2, 0($1) +; MIPS-32-R6-STRICT-NEXT: lbu $3, 1($1) +; MIPS-32-R6-STRICT-NEXT: lbu $4, 3($1) +; MIPS-32-R6-STRICT-NEXT: lbu $5, 2($1) +; MIPS-32-R6-STRICT-NEXT: addiu $6, $zero, 42 +; MIPS-32-R6-STRICT-NEXT: sb $zero, 2($1) +; MIPS-32-R6-STRICT-NEXT: sb $6, 3($1) +; MIPS-32-R6-STRICT-NEXT: sb $zero, 0($1) +; MIPS-32-R6-STRICT-NEXT: sb $zero, 1($1) +; MIPS-32-R6-STRICT-NEXT: sll $1, $5, 8 +; MIPS-32-R6-STRICT-NEXT: or $1, $1, $4 +; MIPS-32-R6-STRICT-NEXT: sll $3, $3, 16 +; MIPS-32-R6-STRICT-NEXT: sll $2, $2, 24 +; MIPS-32-R6-STRICT-NEXT: or $2, $2, $3 +; MIPS-32-R6-STRICT-NEXT: jr $ra +; MIPS-32-R6-STRICT-NEXT: or $2, $2, $1 +; +; MIPS-64-R6-LABEL: __start: +; MIPS-64-R6: # %bb.0: +; MIPS-64-R6-NEXT: lui $1, %hi(%neg(%gp_rel(__start))) +; MIPS-64-R6-NEXT: daddu $1, $1, $25 +; MIPS-64-R6-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(__start))) +; MIPS-64-R6-NEXT: ld $1, %got_disp(var)($1) +; MIPS-64-R6-NEXT: lw $2, 0($1) +; MIPS-64-R6-NEXT: addiu $3, $zero, 42 +; MIPS-64-R6-NEXT: jr $ra +; MIPS-64-R6-NEXT: sw $3, 0($1) +; +; MIPS-64-R6-STRICT-LABEL: __start: +; MIPS-64-R6-STRICT: # %bb.0: +; MIPS-64-R6-STRICT-NEXT: lui $1, %hi(%neg(%gp_rel(__start))) +; MIPS-64-R6-STRICT-NEXT: daddu $1, $1, $25 +; MIPS-64-R6-STRICT-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(__start))) +; MIPS-64-R6-STRICT-NEXT: ld $1, %got_disp(var)($1) +; MIPS-64-R6-STRICT-NEXT: lbu $2, 0($1) +; MIPS-64-R6-STRICT-NEXT: lbu $3, 1($1) +; MIPS-64-R6-STRICT-NEXT: lbu $4, 3($1) +; MIPS-64-R6-STRICT-NEXT: lbu $5, 2($1) +; MIPS-64-R6-STRICT-NEXT: addiu $6, $zero, 42 +; MIPS-64-R6-STRICT-NEXT: sb $zero, 2($1) +; MIPS-64-R6-STRICT-NEXT: sb $6, 3($1) +; MIPS-64-R6-STRICT-NEXT: sb $zero, 0($1) +; MIPS-64-R6-STRICT-NEXT: sb $zero, 1($1) +; MIPS-64-R6-STRICT-NEXT: sll $1, $5, 8 +; MIPS-64-R6-STRICT-NEXT: or $1, $1, $4 +; MIPS-64-R6-STRICT-NEXT: sll $3, $3, 16 +; MIPS-64-R6-STRICT-NEXT: sll $2, $2, 24 +; MIPS-64-R6-STRICT-NEXT: or $2, $2, $3 +; MIPS-64-R6-STRICT-NEXT: jr $ra +; MIPS-64-R6-STRICT-NEXT: or $2, $2, $1 + %1 = load i32, ptr @var, align 1 + store i32 42, ptr @var, align 1 + ret i32 %1 +} diff --git a/llvm/test/CodeGen/Mips/ins.ll b/llvm/test/CodeGen/Mips/ins.ll new file mode 100644 index 0000000..615dc8f --- /dev/null +++ b/llvm/test/CodeGen/Mips/ins.ll @@ -0,0 +1,60 @@ +; RUN: llc -O3 -mcpu=mips32r2 -mtriple=mipsel-linux-gnu < %s -o - \ +; RUN: | FileCheck %s --check-prefixes=MIPS32R2 +; RUN: llc -O3 -mcpu=mips64r2 -march=mips64el < %s \ +; RUN: | FileCheck %s --check-prefixes=MIPS64R2 + +define i32 @or_and_shl(i32 %a, i32 %b) { +; MIPS32R2-LABEL: or_and_shl: +; MIPS32R2: # %bb.0: # %entry +; MIPS32R2-NEXT: ins $4, $5, 31, 1 +; MIPS32R2-NEXT: jr $ra +; MIPS32R2-NEXT: move $2, $4 + +entry: + %shl = shl i32 %b, 31 + %and = and i32 %a, 2147483647 + %or = or i32 %and, %shl + ret i32 %or +} + +define i32 @or_shl_and(i32 %a, i32 %b) { +; MIPS32R2-LABEL: or_shl_and: +; MIPS32R2: # %bb.0: # %entry +; MIPS32R2-NEXT: ins $4, $5, 31, 1 +; MIPS32R2-NEXT: jr $ra +; MIPS32R2-NEXT: move $2, $4 + +entry: + %shl = shl i32 %b, 31 + %and = and i32 %a, 2147483647 + %or = or i32 %shl, %and + ret i32 %or +} + +define i64 @dinsm(i64 %a, i64 %b) { +; MIPS64R2-LABEL: dinsm: +; MIPS64R2: # %bb.0: # %entry +; MIPS64R2-NEXT: dinsm $4, $5, 17, 47 +; MIPS64R2-NEXT: jr $ra +; MIPS64R2-NEXT: move $2, $4 + +entry: + %shl = shl i64 %b, 17 + %and = and i64 %a, 131071 + %or = or i64 %shl, %and + ret i64 %or +} + +define i64 @dinsu(i64 %a, i64 %b) { +; MIPS64R2-LABEL: dinsu: +; MIPS64R2: # %bb.0: # %entry +; MIPS64R2-NEXT: dinsu $4, $5, 35, 29 +; MIPS64R2-NEXT: jr $ra +; MIPS64R2-NEXT: move $2, $4 + +entry: + %shl = shl i64 %b, 35 + %and = and i64 %a, 34359738367 + %or = or i64 %shl, %and + ret i64 %or +} diff --git a/llvm/test/CodeGen/NVPTX/copysign.ll b/llvm/test/CodeGen/NVPTX/copysign.ll new file mode 100644 index 0000000..a6aad1c --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/copysign.ll @@ -0,0 +1,132 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs | FileCheck %s +; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_20 -verify-machineinstrs | %ptxas-verify %} + +target triple = "nvptx64-nvidia-cuda" +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64" + +define float @fcopysign_f_f(float %a, float %b) { +; CHECK-LABEL: fcopysign_f_f( +; CHECK: { +; CHECK-NEXT: .reg .f32 %f<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [fcopysign_f_f_param_0]; +; CHECK-NEXT: ld.param.f32 %f2, [fcopysign_f_f_param_1]; +; CHECK-NEXT: copysign.f32 %f3, %f2, %f1; +; CHECK-NEXT: st.param.f32 [func_retval0+0], %f3; +; CHECK-NEXT: ret; + %val = call float @llvm.copysign.f32(float %a, float %b) + ret float %val +} + +define double @fcopysign_d_d(double %a, double %b) { +; CHECK-LABEL: fcopysign_d_d( +; CHECK: { +; CHECK-NEXT: .reg .f64 %fd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_d_param_0]; +; CHECK-NEXT: ld.param.f64 %fd2, [fcopysign_d_d_param_1]; +; CHECK-NEXT: copysign.f64 %fd3, %fd2, %fd1; +; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd3; +; CHECK-NEXT: ret; + %val = call double @llvm.copysign.f64(double %a, double %b) + ret double %val +} + +define float @fcopysign_f_d(float %a, double %b) { +; CHECK-LABEL: fcopysign_f_d( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [fcopysign_f_d_param_0]; +; CHECK-NEXT: abs.f32 %f2, %f1; +; CHECK-NEXT: neg.f32 %f3, %f2; +; CHECK-NEXT: ld.param.u64 %rd1, [fcopysign_f_d_param_1]; +; CHECK-NEXT: shr.u64 %rd2, %rd1, 63; +; CHECK-NEXT: and.b64 %rd3, %rd2, 1; +; CHECK-NEXT: setp.eq.b64 %p1, %rd3, 1; +; CHECK-NEXT: selp.f32 %f4, %f3, %f2, %p1; +; CHECK-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NEXT: ret; + %c = fptrunc double %b to float + %val = call float @llvm.copysign.f32(float %a, float %c) + ret float %val +} + +define float @fcopysign_f_h(float %a, half %b) { +; CHECK-LABEL: fcopysign_f_h( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .f32 %f<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f32 %f1, [fcopysign_f_h_param_0]; +; CHECK-NEXT: abs.f32 %f2, %f1; +; CHECK-NEXT: neg.f32 %f3, %f2; +; CHECK-NEXT: ld.param.u16 %rs1, [fcopysign_f_h_param_1]; +; CHECK-NEXT: shr.u16 %rs2, %rs1, 15; +; CHECK-NEXT: and.b16 %rs3, %rs2, 1; +; CHECK-NEXT: setp.eq.b16 %p1, %rs3, 1; +; CHECK-NEXT: selp.f32 %f4, %f3, %f2, %p1; +; CHECK-NEXT: st.param.f32 [func_retval0+0], %f4; +; CHECK-NEXT: ret; + %c = fpext half %b to float + %val = call float @llvm.copysign.f32(float %a, float %c) + ret float %val +} + +define double @fcopysign_d_f(double %a, float %b) { +; CHECK-LABEL: fcopysign_d_f( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-NEXT: .reg .f64 %fd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_f_param_0]; +; CHECK-NEXT: abs.f64 %fd2, %fd1; +; CHECK-NEXT: neg.f64 %fd3, %fd2; +; CHECK-NEXT: ld.param.u32 %r1, [fcopysign_d_f_param_1]; +; CHECK-NEXT: shr.u32 %r2, %r1, 31; +; CHECK-NEXT: and.b32 %r3, %r2, 1; +; CHECK-NEXT: setp.eq.b32 %p1, %r3, 1; +; CHECK-NEXT: selp.f64 %fd4, %fd3, %fd2, %p1; +; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd4; +; CHECK-NEXT: ret; + %c = fpext float %b to double + %val = call double @llvm.copysign.f64(double %a, double %c) + ret double %val +} + +define double @fcopysign_d_h(double %a, half %b) { +; CHECK-LABEL: fcopysign_d_h( +; CHECK: { +; CHECK-NEXT: .reg .pred %p<2>; +; CHECK-NEXT: .reg .b16 %rs<4>; +; CHECK-NEXT: .reg .f64 %fd<5>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.f64 %fd1, [fcopysign_d_h_param_0]; +; CHECK-NEXT: abs.f64 %fd2, %fd1; +; CHECK-NEXT: neg.f64 %fd3, %fd2; +; CHECK-NEXT: ld.param.u16 %rs1, [fcopysign_d_h_param_1]; +; CHECK-NEXT: shr.u16 %rs2, %rs1, 15; +; CHECK-NEXT: and.b16 %rs3, %rs2, 1; +; CHECK-NEXT: setp.eq.b16 %p1, %rs3, 1; +; CHECK-NEXT: selp.f64 %fd4, %fd3, %fd2, %p1; +; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd4; +; CHECK-NEXT: ret; + %c = fpext half %b to double + %val = call double @llvm.copysign.f64(double %a, double %c) + ret double %val +} + + +declare float @llvm.copysign.f32(float, float) +declare double @llvm.copysign.f64(double, double) diff --git a/llvm/test/CodeGen/NVPTX/math-intrins.ll b/llvm/test/CodeGen/NVPTX/math-intrins.ll index fcc4ec6e..bdd6c91 100644 --- a/llvm/test/CodeGen/NVPTX/math-intrins.ll +++ b/llvm/test/CodeGen/NVPTX/math-intrins.ll @@ -195,9 +195,8 @@ define double @round_double(double %a) { ; check the use of 0.5 to implement round ; CHECK-LABEL: round_double( ; CHECK: { -; CHECK-NEXT: .reg .pred %p<4>; -; CHECK-NEXT: .reg .b64 %rd<4>; -; CHECK-NEXT: .reg .f64 %fd<10>; +; CHECK-NEXT: .reg .pred %p<3>; +; CHECK-NEXT: .reg .f64 %fd<8>; ; CHECK-EMPTY: ; CHECK-NEXT: // %bb.0: ; CHECK-NEXT: ld.param.f64 %fd1, [round_double_param_0]; @@ -206,16 +205,10 @@ define double @round_double(double %a) { ; CHECK-NEXT: add.rn.f64 %fd3, %fd2, 0d3FE0000000000000; ; CHECK-NEXT: cvt.rzi.f64.f64 %fd4, %fd3; ; CHECK-NEXT: selp.f64 %fd5, 0d0000000000000000, %fd4, %p1; -; CHECK-NEXT: abs.f64 %fd6, %fd5; -; CHECK-NEXT: neg.f64 %fd7, %fd6; -; CHECK-NEXT: mov.b64 %rd1, %fd1; -; CHECK-NEXT: shr.u64 %rd2, %rd1, 63; -; CHECK-NEXT: and.b64 %rd3, %rd2, 1; -; CHECK-NEXT: setp.eq.b64 %p2, %rd3, 1; -; CHECK-NEXT: selp.f64 %fd8, %fd7, %fd6, %p2; -; CHECK-NEXT: setp.gt.f64 %p3, %fd2, 0d4330000000000000; -; CHECK-NEXT: selp.f64 %fd9, %fd1, %fd8, %p3; -; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd9; +; CHECK-NEXT: copysign.f64 %fd6, %fd1, %fd5; +; CHECK-NEXT: setp.gt.f64 %p2, %fd2, 0d4330000000000000; +; CHECK-NEXT: selp.f64 %fd7, %fd1, %fd6, %p2; +; CHECK-NEXT: st.param.f64 [func_retval0+0], %fd7; ; CHECK-NEXT: ret; %b = call double @llvm.round.f64(double %a) ret double %b diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll index 4c5097b..e558566 100644 --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -2033,8 +2033,6 @@ define i64 @fcvt_l_h(half %a) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: call __fixsfdi ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2792,8 +2790,6 @@ define i64 @fcvt_lu_h(half %a) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: slli a0, a0, 16 -; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2 ; RV32I-NEXT: call __fixunssfdi ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll index 65a1035..5d75efe 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll @@ -567,16 +567,15 @@ define <8 x i32> @add_constant_rhs_8xi32_partial(<8 x i32> %vin, i32 %a, i32 %b, ; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 5 ; CHECK-NEXT: vmv.s.x v10, a2 -; CHECK-NEXT: lui a0, %hi(.LCPI19_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0) -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 6 -; CHECK-NEXT: vmv.s.x v10, a3 +; CHECK-NEXT: lui a0, %hi(.LCPI19_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0) ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma -; CHECK-NEXT: vslideup.vi v8, v10, 7 -; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vle32.v v10, (a0) +; CHECK-NEXT: vmv.s.x v12, a3 +; CHECK-NEXT: vslideup.vi v8, v12, 7 +; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %vadd = add <8 x i32> %vin, <i32 1, i32 2, i32 3, i32 5, i32 undef, i32 undef, i32 undef, i32 undef> %e0 = add i32 %a, 23 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll index 14f4f44..24a5bd1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -2501,9 +2501,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB35_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -2546,9 +2546,9 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1> ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_8 ; RV64ZVE32F-NEXT: j .LBB35_9 @@ -2652,9 +2652,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB36_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -2697,9 +2697,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB36_8 ; RV64ZVE32F-NEXT: j .LBB36_9 @@ -2808,9 +2808,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB37_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -2856,9 +2856,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB37_8 ; RV64ZVE32F-NEXT: j .LBB37_9 @@ -2966,9 +2966,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB38_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma @@ -3011,9 +3011,9 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB38_8 ; RV64ZVE32F-NEXT: j .LBB38_9 @@ -3118,9 +3118,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB39_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma @@ -3163,9 +3163,9 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a2 +; RV64ZVE32F-NEXT: vmv.s.x v12, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB39_8 ; RV64ZVE32F-NEXT: j .LBB39_9 @@ -3275,9 +3275,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a3 +; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB40_9: # %else14 ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma @@ -3323,9 +3323,9 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, < ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vmv.s.x v8, a3 +; RV64ZVE32F-NEXT: vmv.s.x v12, a3 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: bnez a3, .LBB40_8 ; RV64ZVE32F-NEXT: j .LBB40_9 @@ -8200,9 +8200,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB74_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -8245,9 +8245,9 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB74_8 ; RV64ZVE32F-NEXT: j .LBB74_9 @@ -8351,9 +8351,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB75_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -8396,9 +8396,9 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB75_8 ; RV64ZVE32F-NEXT: j .LBB75_9 @@ -8507,9 +8507,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB76_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -8555,9 +8555,9 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, < ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB76_8 ; RV64ZVE32F-NEXT: j .LBB76_9 @@ -8665,9 +8665,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB77_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma @@ -8710,9 +8710,9 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB77_8 ; RV64ZVE32F-NEXT: j .LBB77_9 @@ -8817,9 +8817,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB78_9: # %else14 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma @@ -8862,9 +8862,9 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw fa5, 0(a2) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB78_8 ; RV64ZVE32F-NEXT: j .LBB78_9 @@ -8974,9 +8974,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw fa5, 0(a3) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB79_9: # %else14 ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma @@ -9022,9 +9022,9 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs, ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw fa5, 0(a3) ; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5 +; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma -; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 +; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: bnez a3, .LBB79_8 ; RV64ZVE32F-NEXT: j .LBB79_9 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll index efc17b7..28f7eb4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh | FileCheck %s -; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh | FileCheck %s +; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin | FileCheck %s ; Integers @@ -255,6 +257,18 @@ declare {<vscale x 8 x i64>, <vscale x 8 x i64>} @llvm.vector.deinterleave2.nxv1 ; Floats +define {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @vector_deinterleave_nxv2bf16_nxv4bf16(<vscale x 4 x bfloat> %vec) { +; CHECK-LABEL: vector_deinterleave_nxv2bf16_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vnsrl.wi v10, v8, 0 +; CHECK-NEXT: vnsrl.wi v9, v8, 16 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +%retval = call {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>} @llvm.vector.deinterleave2.nxv4bf16(<vscale x 4 x bfloat> %vec) +ret {<vscale x 2 x bfloat>, <vscale x 2 x bfloat>} %retval +} + define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_nxv4f16(<vscale x 4 x half> %vec) { ; CHECK-LABEL: vector_deinterleave_nxv2f16_nxv4f16: ; CHECK: # %bb.0: @@ -267,6 +281,19 @@ define {<vscale x 2 x half>, <vscale x 2 x half>} @vector_deinterleave_nxv2f16_n ret {<vscale x 2 x half>, <vscale x 2 x half>} %retval } +define {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @vector_deinterleave_nxv4bf16_nxv8bf16(<vscale x 8 x bfloat> %vec) { +; CHECK-LABEL: vector_deinterleave_nxv4bf16_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vnsrl.wi v10, v8, 0 +; CHECK-NEXT: vnsrl.wi v11, v8, 16 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: vmv.v.v v9, v11 +; CHECK-NEXT: ret +%retval = call {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>} @llvm.vector.deinterleave2.nxv8bf16(<vscale x 8 x bfloat> %vec) +ret {<vscale x 4 x bfloat>, <vscale x 4 x bfloat>} %retval +} + define {<vscale x 4 x half>, <vscale x 4 x half>} @vector_deinterleave_nxv4f16_nxv8f16(<vscale x 8 x half> %vec) { ; CHECK-LABEL: vector_deinterleave_nxv4f16_nxv8f16: ; CHECK: # %bb.0: @@ -294,6 +321,19 @@ define {<vscale x 2 x float>, <vscale x 2 x float>} @vector_deinterleave_nxv2f32 ret {<vscale x 2 x float>, <vscale x 2 x float>} %retval } +define {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @vector_deinterleave_nxv8bf16_nxv16bf16(<vscale x 16 x bfloat> %vec) { +; CHECK-LABEL: vector_deinterleave_nxv8bf16_nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vnsrl.wi v12, v8, 0 +; CHECK-NEXT: vnsrl.wi v14, v8, 16 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: vmv.v.v v10, v14 +; CHECK-NEXT: ret +%retval = call {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} @llvm.vector.deinterleave2.nxv16bf16(<vscale x 16 x bfloat> %vec) +ret {<vscale x 8 x bfloat>, <vscale x 8 x bfloat>} %retval +} + define {<vscale x 8 x half>, <vscale x 8 x half>} @vector_deinterleave_nxv8f16_nxv16f16(<vscale x 16 x half> %vec) { ; CHECK-LABEL: vector_deinterleave_nxv8f16_nxv16f16: ; CHECK: # %bb.0: @@ -344,6 +384,21 @@ declare {<vscale x 8 x half>, <vscale x 8 x half>} @llvm.vector.deinterleave2.nx declare {<vscale x 4 x float>, <vscale x 4 x float>} @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float>) declare {<vscale x 2 x double>, <vscale x 2 x double>} @llvm.vector.deinterleave2.nxv4f64(<vscale x 4 x double>) +define {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} @vector_deinterleave_nxv32bf16_nxv64bf16(<vscale x 64 x bfloat> %vec) { +; CHECK-LABEL: vector_deinterleave_nxv32bf16_nxv64bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vnsrl.wi v8, v24, 0 +; CHECK-NEXT: vnsrl.wi v12, v16, 0 +; CHECK-NEXT: vnsrl.wi v0, v24, 16 +; CHECK-NEXT: vnsrl.wi v4, v16, 16 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret +%retval = call {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} @llvm.vector.deinterleave2.nxv64bf16(<vscale x 64 x bfloat> %vec) +ret {<vscale x 32 x bfloat>, <vscale x 32 x bfloat>} %retval +} + define {<vscale x 32 x half>, <vscale x 32 x half>} @vector_deinterleave_nxv32f16_nxv64f16(<vscale x 64 x half> %vec) { ; CHECK-LABEL: vector_deinterleave_nxv32f16_nxv64f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll index 2e9f62e..83c235d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll @@ -1,8 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin | FileCheck %s +; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvbb,+zfh,+zvfh,+zvfbfmin | FileCheck %s --check-prefix=ZVBB +; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvbb,+zfh,+zvfh,+zvfbfmin | FileCheck %s --check-prefix=ZVBB ; Integers @@ -364,6 +366,62 @@ declare <vscale x 16 x i64> @llvm.vector.interleave2.nxv16i64(<vscale x 8 x i64> ; Floats +define <vscale x 4 x bfloat> @vector_interleave_nxv4bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) { +; CHECK-LABEL: vector_interleave_nxv4bf16_nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vwaddu.vv v10, v8, v9 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v10, a0, v9 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: srli a0, a0, 2 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v10, a0 +; CHECK-NEXT: add a1, a0, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v10, v8, a0 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv4bf16_nxv2bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVBB-NEXT: vwsll.vi v10, v9, 16 +; ZVBB-NEXT: vwaddu.wv v10, v10, v8 +; ZVBB-NEXT: csrr a0, vlenb +; ZVBB-NEXT: srli a0, a0, 2 +; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; ZVBB-NEXT: vslidedown.vx v8, v10, a0 +; ZVBB-NEXT: add a1, a0, a0 +; ZVBB-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; ZVBB-NEXT: vslideup.vx v10, v8, a0 +; ZVBB-NEXT: vmv.v.v v8, v10 +; ZVBB-NEXT: ret + %res = call <vscale x 4 x bfloat> @llvm.vector.interleave2.nxv4bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) + ret <vscale x 4 x bfloat> %res +} + +define <vscale x 8 x bfloat> @vector_interleave_nxv8bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) { +; CHECK-LABEL: vector_interleave_nxv8bf16_nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vwaddu.vv v10, v8, v9 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v10, a0, v9 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv8bf16_nxv4bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVBB-NEXT: vwsll.vi v10, v9, 16 +; ZVBB-NEXT: vwaddu.wv v10, v10, v8 +; ZVBB-NEXT: vmv2r.v v8, v10 +; ZVBB-NEXT: ret + %res = call <vscale x 8 x bfloat> @llvm.vector.interleave2.nxv8bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) + ret <vscale x 8 x bfloat> %res +} + define <vscale x 4 x half> @vector_interleave_nxv4f16_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) { ; CHECK-LABEL: vector_interleave_nxv4f16_nxv2f16: ; CHECK: # %bb.0: @@ -442,6 +500,27 @@ define <vscale x 4 x float> @vector_interleave_nxv4f32_nxv2f32(<vscale x 2 x flo ret <vscale x 4 x float> %res } +define <vscale x 16 x bfloat> @vector_interleave_nxv16bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) { +; CHECK-LABEL: vector_interleave_nxv16bf16_nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vwaddu.vv v12, v8, v10 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v12, a0, v10 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv16bf16_nxv8bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVBB-NEXT: vwsll.vi v12, v10, 16 +; ZVBB-NEXT: vwaddu.wv v12, v12, v8 +; ZVBB-NEXT: vmv4r.v v8, v12 +; ZVBB-NEXT: ret + %res = call <vscale x 16 x bfloat> @llvm.vector.interleave2.nxv16bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) + ret <vscale x 16 x bfloat> %res +} + define <vscale x 16 x half> @vector_interleave_nxv16f16_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) { ; CHECK-LABEL: vector_interleave_nxv16f16_nxv8f16: ; CHECK: # %bb.0: @@ -527,6 +606,33 @@ declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x hal declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>) declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>) +define <vscale x 64 x bfloat> @vector_interleave_nxv64bf16_nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) { +; CHECK-LABEL: vector_interleave_nxv64bf16_nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vmv8r.v v24, v8 +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vwaddu.vv v8, v24, v16 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v8, a0, v16 +; CHECK-NEXT: vwaddu.vv v0, v28, v20 +; CHECK-NEXT: vwmaccu.vx v0, a0, v20 +; CHECK-NEXT: vmv8r.v v16, v0 +; CHECK-NEXT: ret +; +; ZVBB-LABEL: vector_interleave_nxv64bf16_nxv32bf16: +; ZVBB: # %bb.0: +; ZVBB-NEXT: vmv8r.v v24, v8 +; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVBB-NEXT: vwsll.vi v8, v16, 16 +; ZVBB-NEXT: vwaddu.wv v8, v8, v24 +; ZVBB-NEXT: vwsll.vi v0, v20, 16 +; ZVBB-NEXT: vwaddu.wv v0, v0, v28 +; ZVBB-NEXT: vmv8r.v v16, v0 +; ZVBB-NEXT: ret + %res = call <vscale x 64 x bfloat> @llvm.vector.interleave2.nxv64bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) + ret <vscale x 64 x bfloat> %res +} + define <vscale x 64 x half> @vector_interleave_nxv64f16_nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b) { ; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll deleted file mode 100644 index 1c6a57d..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll +++ /dev/null @@ -1,75 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s - -define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %v) { -; CHECK-LABEL: nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret - %r = call <vscale x 1 x bfloat> @llvm.fabs.nxv1bf16(<vscale x 1 x bfloat> %v) - ret <vscale x 1 x bfloat> %r -} - -define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %v) { -; CHECK-LABEL: nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret - %r = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %v) - ret <vscale x 2 x bfloat> %r -} - -define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %v) { -; CHECK-LABEL: nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret - %r = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %v) - ret <vscale x 4 x bfloat> %r -} - -define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %v) { -; CHECK-LABEL: nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret - %r = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %v) - ret <vscale x 8 x bfloat> %r -} - -define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %v) { -; CHECK-LABEL: nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret - %r = call <vscale x 16 x bfloat> @llvm.fabs.nxv16bf16(<vscale x 16 x bfloat> %v) - ret <vscale x 16 x bfloat> %r -} - -define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %v) { -; CHECK-LABEL: nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: ret - %r = call <vscale x 32 x bfloat> @llvm.fabs.nxv32bf16(<vscale x 32 x bfloat> %v) - ret <vscale x 32 x bfloat> %r -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll index 4bf9ae1..c8313c9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -1,12 +1,88 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN + +define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %v) { +; CHECK-LABEL: nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %r = call <vscale x 1 x bfloat> @llvm.fabs.nxv1bf16(<vscale x 1 x bfloat> %v) + ret <vscale x 1 x bfloat> %r +} + +define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %v) { +; CHECK-LABEL: nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %r = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %v) + ret <vscale x 2 x bfloat> %r +} + +define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %v) { +; CHECK-LABEL: nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %r = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %v) + ret <vscale x 4 x bfloat> %r +} + +define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %v) { +; CHECK-LABEL: nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %r = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %v) + ret <vscale x 8 x bfloat> %r +} + +define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %v) { +; CHECK-LABEL: nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %r = call <vscale x 16 x bfloat> @llvm.fabs.nxv16bf16(<vscale x 16 x bfloat> %v) + ret <vscale x 16 x bfloat> %r +} + +define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %v) { +; CHECK-LABEL: nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %r = call <vscale x 32 x bfloat> @llvm.fabs.nxv32bf16(<vscale x 32 x bfloat> %v) + ret <vscale x 32 x bfloat> %r +} declare <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half>) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll deleted file mode 100644 index ee050ba..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll +++ /dev/null @@ -1,87 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s - -define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) { -; CHECK-LABEL: nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma -; CHECK-NEXT: vand.vx v9, v9, a0 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret - %r = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) - ret <vscale x 1 x bfloat> %r -} - -define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) { -; CHECK-LABEL: nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vand.vx v9, v9, a0 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret - %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) - ret <vscale x 2 x bfloat> %r -} - -define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) { -; CHECK-LABEL: nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vand.vx v9, v9, a0 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vor.vv v8, v8, v9 -; CHECK-NEXT: ret - %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) - ret <vscale x 4 x bfloat> %r -} - -define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) { -; CHECK-LABEL: nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vand.vx v10, v10, a0 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vor.vv v8, v8, v10 -; CHECK-NEXT: ret - %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) - ret <vscale x 8 x bfloat> %r -} - -define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) { -; CHECK-LABEL: nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vand.vx v12, v12, a0 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vor.vv v8, v8, v12 -; CHECK-NEXT: ret - %r = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) - ret <vscale x 16 x bfloat> %r -} - -define <vscale x 32 x bfloat> @nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) { -; CHECK-LABEL: nxv32bf32: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vand.vx v16, v16, a0 -; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: ret - %r = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) - ret <vscale x 32 x bfloat> %r -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll index f37e593..c1e1450 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -1,12 +1,100 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN + +define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) { +; CHECK-LABEL: nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: ret + %r = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) + ret <vscale x 1 x bfloat> %r +} + +define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) { +; CHECK-LABEL: nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: ret + %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) + ret <vscale x 2 x bfloat> %r +} + +define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) { +; CHECK-LABEL: nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: ret + %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) + ret <vscale x 4 x bfloat> %r +} + +define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) { +; CHECK-LABEL: nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: ret + %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) + ret <vscale x 8 x bfloat> %r +} + +define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) { +; CHECK-LABEL: nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: ret + %r = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) + ret <vscale x 16 x bfloat> %r +} + +define <vscale x 32 x bfloat> @nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) { +; CHECK-LABEL: nxv32bf32: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: ret + %r = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) + ret <vscale x 32 x bfloat> %r +} declare <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll deleted file mode 100644 index 8fef7f6..0000000 --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll +++ /dev/null @@ -1,69 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s - -define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %va) { -; CHECK-LABEL: nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret - %vb = fneg <vscale x 1 x bfloat> %va - ret <vscale x 1 x bfloat> %vb -} - -define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %va) { -; CHECK-LABEL: nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret - %vb = fneg <vscale x 2 x bfloat> %va - ret <vscale x 2 x bfloat> %vb -} - -define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %va) { -; CHECK-LABEL: nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret - %vb = fneg <vscale x 4 x bfloat> %va - ret <vscale x 4 x bfloat> %vb -} - -define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %va) { -; CHECK-LABEL: nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret - %vb = fneg <vscale x 8 x bfloat> %va - ret <vscale x 8 x bfloat> %vb -} - -define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %va) { -; CHECK-LABEL: nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret - %vb = fneg <vscale x 16 x bfloat> %va - ret <vscale x 16 x bfloat> %vb -} - -define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %va) { -; CHECK-LABEL: nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vxor.vx v8, v8, a0 -; CHECK-NEXT: ret - %vb = fneg <vscale x 32 x bfloat> %va - ret <vscale x 32 x bfloat> %vb -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll index 2991e52..b4ec691 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll @@ -1,12 +1,82 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH -; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN -; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFH +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ +; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFHMIN + +define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %va) { +; CHECK-LABEL: nxv1bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %vb = fneg <vscale x 1 x bfloat> %va + ret <vscale x 1 x bfloat> %vb +} + +define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %va) { +; CHECK-LABEL: nxv2bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %vb = fneg <vscale x 2 x bfloat> %va + ret <vscale x 2 x bfloat> %vb +} + +define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %va) { +; CHECK-LABEL: nxv4bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %vb = fneg <vscale x 4 x bfloat> %va + ret <vscale x 4 x bfloat> %vb +} + +define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %va) { +; CHECK-LABEL: nxv8bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %vb = fneg <vscale x 8 x bfloat> %va + ret <vscale x 8 x bfloat> %vb +} + +define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %va) { +; CHECK-LABEL: nxv16bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %vb = fneg <vscale x 16 x bfloat> %va + ret <vscale x 16 x bfloat> %vb +} + +define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %va) { +; CHECK-LABEL: nxv32bf16: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 8 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %vb = fneg <vscale x 32 x bfloat> %va + ret <vscale x 32 x bfloat> %vb +} define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) { ; ZVFH-LABEL: vfneg_vv_nxv1f16: diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/step.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/step.ll new file mode 100644 index 0000000..bb50d8c --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/step.ll @@ -0,0 +1,33 @@ +; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; Make sure SPIRV operation function calls for step are lowered correctly.
+
+; CHECK-DAG: %[[#op_ext_glsl:]] = OpExtInstImport "GLSL.std.450"
+; CHECK-DAG: %[[#float_32:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#float_16:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#vec4_float_16:]] = OpTypeVector %[[#float_16]] 4
+; CHECK-DAG: %[[#vec4_float_32:]] = OpTypeVector %[[#float_32]] 4
+
+define noundef <4 x half> @step_half4(<4 x half> noundef %a, <4 x half> noundef %b) {
+entry:
+ ; CHECK: %[[#]] = OpFunction %[[#vec4_float_16]] None %[[#]]
+ ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec4_float_16]]
+ ; CHECK: %[[#arg1:]] = OpFunctionParameter %[[#vec4_float_16]]
+ ; CHECK: %[[#]] = OpExtInst %[[#vec4_float_16]] %[[#op_ext_glsl]] Step %[[#arg0]] %[[#arg1]]
+ %hlsl.step = call <4 x half> @llvm.spv.step.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %hlsl.step
+}
+
+define noundef <4 x float> @step_float4(<4 x float> noundef %a, <4 x float> noundef %b) {
+entry:
+ ; CHECK: %[[#]] = OpFunction %[[#vec4_float_32]] None %[[#]]
+ ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec4_float_32]]
+ ; CHECK: %[[#arg1:]] = OpFunctionParameter %[[#vec4_float_32]]
+ ; CHECK: %[[#]] = OpExtInst %[[#vec4_float_32]] %[[#op_ext_glsl]] Step %[[#arg0]] %[[#arg1]]
+ %hlsl.step = call <4 x float> @llvm.spv.step.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %hlsl.step
+}
+
+declare <4 x half> @llvm.spv.step.v4f16(<4 x half>, <4 x half>)
+declare <4 x float> @llvm.spv.step.v4f32(<4 x float>, <4 x float>)
diff --git a/llvm/test/CodeGen/X86/avx10_2_512satcvtds-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2_512satcvtds-intrinsics.ll new file mode 100644 index 0000000..652c35c --- /dev/null +++ b/llvm/test/CodeGen/X86/avx10_2_512satcvtds-intrinsics.ll @@ -0,0 +1,526 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+avx10.2-512 | FileCheck %s --check-prefixes=CHECK,X64 +; RUN: llc < %s -verify-machineinstrs -mtriple=i686-unknown-unknown --show-mc-encoding -mattr=+avx10.2-512 | FileCheck %s --check-prefixes=CHECK,X86 + + +define <8 x i32> @test_int_x86_mask_vcvtt_pd2dqs_512(<8 x double> %x0, <8 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %zmm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x49,0x6d,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %zmm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x49,0x6d,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> %x0, <8 x i32> %src, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_maskz_vcvtt_pd2dqs_512_z(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2dqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2dqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> %x0, <8 x i32> zeroinitializer, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_pd2dqs_512_undef(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> %x0, <8 x i32> undef, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_pd2dqs_512_default(<8 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_512_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2dqs (%rdi), %ymm0 # encoding: [0x62,0xf5,0xfc,0x48,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_512_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2dqs (%eax), %ymm0 # encoding: [0x62,0xf5,0xfc,0x48,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <8 x double>, <8 x double> * %x0 + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double> %x10, <8 x i32> undef, i8 -1, i32 4) + ret <8 x i32> %res +} +declare <8 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.512(<8 x double>, <8 x i32>, i8 , i32) + +define <8 x i32> @test_int_x86_mask_vcvtt_pd2udqs_512(<8 x double> %x0, <8 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2udqs %zmm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x49,0x6c,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2udqs %zmm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x49,0x6c,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> %x0, <8 x i32> %src, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_maskz_vcvtt_pd2udqs_512_z(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2udqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2udqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2udqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2udqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> %x0, <8 x i32> zeroinitializer, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_pd2udqs_512_undef(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2udqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2udqs %zmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> %x0, <8 x i32> undef, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_pd2udqs_512_default(<8 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_512_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2udqs (%rdi), %ymm0 # encoding: [0x62,0xf5,0xfc,0x48,0x6c,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_512_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2udqs (%eax), %ymm0 # encoding: [0x62,0xf5,0xfc,0x48,0x6c,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <8 x double>, <8 x double> * %x0 + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double> %x10, <8 x i32> undef, i8 -1, i32 4) + ret <8 x i32> %res +} +declare <8 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.512(<8 x double>, <8 x i32>, i8 , i32) + +define <8 x i64> @test_int_x86_mask_vcvtt_pd2qqs_512(<8 x double> %x0, <8 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x49,0x6d,0xc8] +; X64-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x49,0x6d,0xc8] +; X86-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> %x0, <8 x i64> %src, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_maskz_vcvtt_pd2qqs_512_z(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2qqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2qqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> %x0, <8 x i64> zeroinitializer, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_pd2qqs_512_undef(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> %x0, <8 x i64> undef, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_pd2qqs_512_default(<8 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_512_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2qqs (%rdi), %zmm0 # encoding: [0x62,0xf5,0xfd,0x48,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_512_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2qqs (%eax), %zmm0 # encoding: [0x62,0xf5,0xfd,0x48,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <8 x double>, <8 x double>* %x0 + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double> %x10, <8 x i64> undef, i8 -1, i32 4) + ret <8 x i64> %res +} +declare <8 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.512(<8 x double>, <8 x i64>, i8 , i32) + +define <8 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_512(<8 x double> %x0, <8 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x49,0x6c,0xc8] +; X64-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x49,0x6c,0xc8] +; X86-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> %x0, <8 x i64> %src, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_maskz_vcvtt_pd2uqqs_512_z(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2uqqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2uqqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> %x0, <8 x i64> zeroinitializer, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_512_undef(<8 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> %x0, <8 x i64> undef, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_512_default(<8 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_512_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2uqqs (%rdi), %zmm0 # encoding: [0x62,0xf5,0xfd,0x48,0x6c,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_512_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs (%eax), %zmm0 # encoding: [0x62,0xf5,0xfd,0x48,0x6c,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <8 x double>, <8 x double>* %x0 + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double> %x10, <8 x i64> undef, i8 -1, i32 4) + ret <8 x i64> %res +} +declare <8 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.512(<8 x double>, <8 x i64>, i8 , i32) + +define <16 x i32> @test_int_x86_mask_vcvtt_ps2dqs_512(<16 x float> %x0, <16 x i32> %src, i16 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6d,0xc8] +; X64-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6d,0xc8] +; X86-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> %x0, <16 x i32> %src, i16 %mask, i32 4) + ret <16 x i32> %res +} + +define <16 x i32> @test_int_x86_maskz_vcvtt_ps2dqs_512_z(<16 x float> %x0, i16 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2dqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2dqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> %x0, <16 x i32> zeroinitializer, i16 %mask, i32 4) + ret <16 x i32> %res +} + +define <16 x i32> @test_int_x86_mask_vcvtt_ps2dqs_512_undef(<16 x float> %x0, i16 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> %x0, <16 x i32> undef, i16 %mask, i32 4) + ret <16 x i32> %res +} + +define <16 x i32> @test_int_x86_mask_vcvtt_ps2dqs_512_default(<16 x float>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_512_default: +; X64: # %bb.0: +; X64-NEXT: vcvttps2dqs (%rdi), %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_512_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttps2dqs (%eax), %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <16 x float>, <16 x float>* %x0 + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> %x10, <16 x i32> undef, i16 -1, i32 4) + ret <16 x i32> %res +} +declare <16 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float>, <16 x i32>, i16 , i32) + +define <16 x i32> @test_int_x86_mask_vcvtt_ps2udqs_512(<16 x float> %x0, <16 x i32> %src, i16 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6c,0xc8] +; X64-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %zmm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x49,0x6c,0xc8] +; X86-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> %x0, <16 x i32> %src, i16 %mask, i32 4) + ret <16 x i32> %res +} + +define <16 x i32> @test_int_x86_maskz_vcvtt_ps2udqs_512_z(<16 x float> %x0, i16 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2udqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2udqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> %x0, <16 x i32> zeroinitializer, i16 %mask, i32 4) + ret <16 x i32> %res +} + +define <16 x i32> @test_int_x86_mask_vcvtt_ps2udqs_512_undef(<16 x float> %x0, i16 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float> %x0, <16 x i32> undef, i16 %mask, i32 4) + ret <16 x i32> %res +} + +define <16 x i32> @test_int_x86_mask_vcvtt_ps2udqs_512_default(<16 x float>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_512_default: +; X64: # %bb.0: +; X64-NEXT: vcvttps2dqs (%rdi), %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_512_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttps2dqs (%eax), %zmm0 # encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <16 x float>, <16 x float>* %x0 + %res = call <16 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.512(<16 x float> %x10, <16 x i32> undef, i16 -1, i32 4) + ret <16 x i32> %res +} +declare <16 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.512(<16 x float>, <16 x i32>, i16 , i32) + +define <8 x i64> @test_int_x86_mask_vcvtt_ps2qqs_512(<8 x float> %x0, <8 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2qqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %ymm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6d,0xc8] +; X64-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2qqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %ymm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6d,0xc8] +; X86-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> %x0, <8 x i64> %src, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_maskz_vcvtt_ps2qqs_512_z(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2qqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2qqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> %x0, <8 x i64> zeroinitializer, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_ps2qqs_512_undef(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2qqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2qqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> %x0, <8 x i64> undef, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_ps2qqs_512_default(<8 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2qqs_512_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2qqs %ymm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x48,0x6d,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float> %x0, <8 x i64> undef, i8 -1, i32 4) + ret <8 x i64> %res +} +declare <8 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.512(<8 x float>, <8 x i64>, i8 , i32) + +define <8 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_512(<8 x float> %x0, <8 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %ymm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6c,0xc8] +; X64-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_512: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %ymm0, %zmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x49,0x6c,0xc8] +; X86-NEXT: vmovaps %zmm1, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> %x0, <8 x i64> %src, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_maskz_vcvtt_ps2uqqs_512_z(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2uqqs_512_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2uqqs_512_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> %x0, <8 x i64> zeroinitializer, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_512_undef(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_512_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_512_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %ymm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xc9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> %x0, <8 x i64> undef, i8 %mask, i32 4) + ret <8 x i64> %res +} + +define <8 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_512_default(<8 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_512_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2uqqs %ymm0, %zmm0 # encoding: [0x62,0xf5,0x7d,0x48,0x6c,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <8 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float> %x0, <8 x i64> undef, i8 -1, i32 4) + ret <8 x i64> %res +} +declare <8 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.512(<8 x float>, <8 x i64>, i8 , i32) diff --git a/llvm/test/CodeGen/X86/avx10_2fptosi_satcvtds.ll b/llvm/test/CodeGen/X86/avx10_2fptosi_satcvtds.ll new file mode 100644 index 0000000..4a6556b --- /dev/null +++ b/llvm/test/CodeGen/X86/avx10_2fptosi_satcvtds.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-linux -mattr=+avx10.2-256 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx10.2-256 | FileCheck %s --check-prefix=X64 + +; +; 32-bit float to signed integer +; + +declare i32 @llvm.fptosi.sat.i32.f32 (float) +declare i64 @llvm.fptosi.sat.i64.f32 (float) + +define i32 @test_signed_i32_f32(float %f) nounwind { +; X86-LABEL: test_signed_i32_f32: +; X86: # %bb.0: +; X86-NEXT: vcvttss2sis {{[0-9]+}}(%esp), %eax +; X86-NEXT: retl +; +; X64-LABEL: test_signed_i32_f32: +; X64: # %bb.0: +; X64-NEXT: vcvttss2sis %xmm0, %eax +; X64-NEXT: retq + %x = call i32 @llvm.fptosi.sat.i32.f32(float %f) + ret i32 %x +} + +define i64 @test_signed_i64_f32(float %f) nounwind { +; X86-LABEL: test_signed_i64_f32: +; X86: # %bb.0: +; X86-NEXT: pushl %edi +; X86-NEXT: pushl %esi +; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-NEXT: vcvttps2qq %xmm1, %xmm1 +; X86-NEXT: vmovd %xmm1, %esi +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: cmovbl %ecx, %esi +; X86-NEXT: vpextrd $1, %xmm1, %eax +; X86-NEXT: movl $-2147483648, %edi # imm = 0x80000000 +; X86-NEXT: cmovael %eax, %edi +; X86-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF +; X86-NEXT: cmovbel %edi, %edx +; X86-NEXT: movl $-1, %eax +; X86-NEXT: cmovbel %esi, %eax +; X86-NEXT: vucomiss %xmm0, %xmm0 +; X86-NEXT: cmovpl %ecx, %eax +; X86-NEXT: cmovpl %ecx, %edx +; X86-NEXT: popl %esi +; X86-NEXT: popl %edi +; X86-NEXT: retl +; +; X64-LABEL: test_signed_i64_f32: +; X64: # %bb.0: +; X64-NEXT: vcvttss2sis %xmm0, %rax +; X64-NEXT: retq + %x = call i64 @llvm.fptosi.sat.i64.f32(float %f) + ret i64 %x +} + +; +; 64-bit float to signed integer +; + +declare i32 @llvm.fptosi.sat.i32.f64 (double) +declare i64 @llvm.fptosi.sat.i64.f64 (double) + +define i32 @test_signed_i32_f64(double %f) nounwind { +; X86-LABEL: test_signed_i32_f64: +; X86: # %bb.0: +; X86-NEXT: vcvttsd2sis {{[0-9]+}}(%esp), %eax +; X86-NEXT: retl +; +; X64-LABEL: test_signed_i32_f64: +; X64: # %bb.0: +; X64-NEXT: vcvttsd2sis %xmm0, %eax +; X64-NEXT: retq + %x = call i32 @llvm.fptosi.sat.i32.f64(double %f) + ret i32 %x +} + +define i64 @test_signed_i64_f64(double %f) nounwind { +; X86-LABEL: test_signed_i64_f64: +; X86: # %bb.0: +; X86-NEXT: pushl %edi +; X86-NEXT: pushl %esi +; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; X86-NEXT: vcvttpd2qq %xmm1, %xmm1 +; X86-NEXT: vmovd %xmm1, %esi +; X86-NEXT: xorl %ecx, %ecx +; X86-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: cmovbl %ecx, %esi +; X86-NEXT: vpextrd $1, %xmm1, %eax +; X86-NEXT: movl $-2147483648, %edi # imm = 0x80000000 +; X86-NEXT: cmovael %eax, %edi +; X86-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF +; X86-NEXT: cmovbel %edi, %edx +; X86-NEXT: movl $-1, %eax +; X86-NEXT: cmovbel %esi, %eax +; X86-NEXT: vucomisd %xmm0, %xmm0 +; X86-NEXT: cmovpl %ecx, %eax +; X86-NEXT: cmovpl %ecx, %edx +; X86-NEXT: popl %esi +; X86-NEXT: popl %edi +; X86-NEXT: retl +; +; X64-LABEL: test_signed_i64_f64: +; X64: # %bb.0: +; X64-NEXT: vcvttsd2sis %xmm0, %rax +; X64-NEXT: retq + %x = call i64 @llvm.fptosi.sat.i64.f64(double %f) + ret i64 %x +} diff --git a/llvm/test/CodeGen/X86/avx10_2satcvtds-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2satcvtds-intrinsics.ll new file mode 100644 index 0000000..922ac92 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx10_2satcvtds-intrinsics.ll @@ -0,0 +1,1066 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=CHECK,X64 +; RUN: llc < %s -verify-machineinstrs -mtriple=i686-unknown-unknown --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s --check-prefixes=CHECK,X86 + +define i32 @test_x86_avx512_vcvttsd2usis(<2 x double> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttsd2usis: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttsd2usis %xmm0, %ecx # encoding: [0x62,0xf5,0x7f,0x08,0x6c,0xc8] +; CHECK-NEXT: vcvttsd2usis {sae}, %xmm0, %eax # encoding: [0x62,0xf5,0x7f,0x18,0x6c,0xc0] +; CHECK-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res0 = call i32 @llvm.x86.avx10.vcvttsd2usis(<2 x double> %a0, i32 4) ; + %res1 = call i32 @llvm.x86.avx10.vcvttsd2usis(<2 x double> %a0, i32 8) ; + %res2 = add i32 %res0, %res1 + ret i32 %res2 +} +declare i32 @llvm.x86.avx10.vcvttsd2usis(<2 x double>, i32) nounwind readnone + +define i32 @test_x86_avx512_vcvttsd2sis(<2 x double> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttsd2sis: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttsd2sis %xmm0, %ecx # encoding: [0x62,0xf5,0x7f,0x08,0x6d,0xc8] +; CHECK-NEXT: vcvttsd2sis {sae}, %xmm0, %eax # encoding: [0x62,0xf5,0x7f,0x18,0x6d,0xc0] +; CHECK-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res0 = call i32 @llvm.x86.avx10.vcvttsd2sis(<2 x double> %a0, i32 4) ; + %res1 = call i32 @llvm.x86.avx10.vcvttsd2sis(<2 x double> %a0, i32 8) ; + %res2 = add i32 %res0, %res1 + ret i32 %res2 +} +declare i32 @llvm.x86.avx10.vcvttsd2sis(<2 x double>, i32) nounwind readnone + +define i32 @test_x86_avx512_vcvttss2sis(<4 x float> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttss2sis: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttss2sis {sae}, %xmm0, %ecx # encoding: [0x62,0xf5,0x7e,0x18,0x6d,0xc8] +; CHECK-NEXT: vcvttss2sis %xmm0, %eax # encoding: [0x62,0xf5,0x7e,0x08,0x6d,0xc0] +; CHECK-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res0 = call i32 @llvm.x86.avx10.vcvttss2sis(<4 x float> %a0, i32 8) ; + %res1 = call i32 @llvm.x86.avx10.vcvttss2sis(<4 x float> %a0, i32 4) ; + %res2 = add i32 %res0, %res1 + ret i32 %res2 +} +declare i32 @llvm.x86.avx10.vcvttss2sis(<4 x float>, i32) nounwind readnone + +define i32 @test_x86_avx512_vcvttss2sis_load(ptr %a0) { +; X64-LABEL: test_x86_avx512_vcvttss2sis_load: +; X64: # %bb.0: +; X64-NEXT: vcvttss2sis (%rdi), %eax # encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_x86_avx512_vcvttss2sis_load: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttss2sis (%eax), %eax # encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %a1 = load <4 x float>, ptr %a0 + %res = call i32 @llvm.x86.avx10.vcvttss2sis(<4 x float> %a1, i32 4) ; + ret i32 %res +} + +define i32 @test_x86_avx512_vcvttss2usis(<4 x float> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttss2usis: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttss2usis {sae}, %xmm0, %ecx # encoding: [0x62,0xf5,0x7e,0x18,0x6c,0xc8] +; CHECK-NEXT: vcvttss2usis %xmm0, %eax # encoding: [0x62,0xf5,0x7e,0x08,0x6c,0xc0] +; CHECK-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res0 = call i32 @llvm.x86.avx10.vcvttss2usis(<4 x float> %a0, i32 8) ; + %res1 = call i32 @llvm.x86.avx10.vcvttss2usis(<4 x float> %a0, i32 4) ; + %res2 = add i32 %res0, %res1 + ret i32 %res2 +} +declare i32 @llvm.x86.avx10.vcvttss2usis(<4 x float>, i32) nounwind readnone + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2dqs_256(<4 x double> %x0, <4 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x29,0x6d,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x29,0x6d,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.256( <4 x double> %x0, <4 x i32> %src, i8 %mask, i32 4) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_maskz_vcvtt_pd2dqs_256_z(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2dqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6d,0xc0] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2dqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6d,0xc0] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.256( <4 x double> %x0, <4 x i32> zeroinitializer, i8 %mask, i32 4) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2dqs_256_undef(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6d,0xc0] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6d,0xc0] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.256( <4 x double> %x0, <4 x i32> undef, i8 %mask, i32 4) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2dqs_256_default(<4 x double>* %xptr) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_256_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2dqsy (%rdi), %xmm0 # encoding: [0x62,0xf5,0xfc,0x28,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_256_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2dqsy (%eax), %xmm0 # encoding: [0x62,0xf5,0xfc,0x28,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x0 = load <4 x double>, <4 x double> * %xptr + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.256( <4 x double> %x0, <4 x i32> undef, i8 -1, i32 4) + ret <4 x i32> %res +} +declare <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.round.256(<4 x double>, <4 x i32>, i8 , i32) + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2udqs_256(<4 x double> %x0, <4 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2udqs %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x29,0x6c,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2udqs %ymm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x29,0x6c,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.256( <4 x double> %x0, <4 x i32> %src, i8 %mask, i32 4) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_maskz_vcvtt_pd2udqs_256_z(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2udqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2udqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6c,0xc0] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2udqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2udqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6c,0xc0] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.256( <4 x double> %x0, <4 x i32> zeroinitializer, i8 %mask, i32 4) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2udqs_256_undef(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2udqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6c,0xc0] +; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2udqs %ymm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0xa9,0x6c,0xc0] +; X86-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.256( <4 x double> %x0, <4 x i32> undef, i8 %mask, i32 4) + ret <4 x i32> %res +} + + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2udqs_256_default(<4 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_256_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2udqsy (%rdi), %xmm0 # encoding: [0x62,0xf5,0xfc,0x28,0x6c,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_256_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2udqsy (%eax), %xmm0 # encoding: [0x62,0xf5,0xfc,0x28,0x6c,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <4 x double>, <4 x double> * %x0 + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.256( <4 x double> %x10, <4 x i32> undef, i8 -1, i32 4) + ret <4 x i32> %res +} +declare <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.round.256(<4 x double>, <4 x i32>, i8 , i32) + +define <4 x i64> @test_int_x86_mask_vcvtt_pd2qqs_256(<4 x double> %x0, <4 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x29,0x6d,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x29,0x6d,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.256( <4 x double> %x0, <4 x i64> %src, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_maskz_vcvtt_pd2qqs_256_z(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2qqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2qqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.256( <4 x double> %x0, <4 x i64> zeroinitializer, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_mask_vcvtt_pd2qqs_256_undef(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.256( <4 x double> %x0, <4 x i64> undef, i8 %mask, i32 4) + ret <4 x i64> %res +} + + +define <4 x i64> @test_int_x86_mask_vcvtt_pd2qqs_256_default(<4 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_256_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2qqs (%rdi), %ymm0 # encoding: [0x62,0xf5,0xfd,0x28,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_256_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2qqs (%eax), %ymm0 # encoding: [0x62,0xf5,0xfd,0x28,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <4 x double>, <4 x double>* %x0 + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.256( <4 x double> %x10, <4 x i64> undef, i8 -1, i32 4) + ret <4 x i64> %res +} +declare <4 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.round.256(<4 x double>, <4 x i64>, i8 , i32) + +define <4 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_256(<4 x double> %x0, <4 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x29,0x6c,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x29,0x6c,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256( <4 x double> %x0, <4 x i64> %src, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_maskz_vcvtt_pd2uqqs_256_z(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2uqqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2uqqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256( <4 x double> %x0, <4 x i64> zeroinitializer, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_256_undef(<4 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0xa9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256( <4 x double> %x0, <4 x i64> undef, i8 %mask, i32 4) + ret <4 x i64> %res +} + + +define <4 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_256_default(<4 x double>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_256_default: +; X64: # %bb.0: +; X64-NEXT: vcvttpd2uqqs (%rdi), %ymm0 # encoding: [0x62,0xf5,0xfd,0x28,0x6c,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_256_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs (%eax), %ymm0 # encoding: [0x62,0xf5,0xfd,0x28,0x6c,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <4 x double>, <4 x double>* %x0 + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256( <4 x double> %x10, <4 x i64> undef, i8 -1, i32 4) + ret <4 x i64> %res +} +declare <4 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.round.256(<4 x double>, <4 x i64>, i8 , i32) + +define <8 x i32> @test_int_x86_mask_vcvtt_ps2dqs_256(<8 x float> %x0, <8 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6d,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6d,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.256( <8 x float> %x0, <8 x i32> %src, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_maskz_vcvtt_ps2dqs_256_z(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2dqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2dqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.256( <8 x float> %x0, <8 x i32> zeroinitializer, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_ps2dqs_256_undef(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.256( <8 x float> %x0, <8 x i32> undef, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_ps2dqs_256_default(<8 x float>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_256_default: +; X64: # %bb.0: +; X64-NEXT: vcvttps2dqs (%rdi), %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x6d,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_256_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttps2dqs (%eax), %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x6d,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <8 x float>, <8 x float>* %x0 + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.256( <8 x float> %x10, <8 x i32> undef, i8 -1, i32 4) + ret <8 x i32> %res +} +declare <8 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.round.256(<8 x float>, <8 x i32>, i8 , i32) + +define <8 x i32> @test_int_x86_mask_vcvtt_ps2udqs_256(<8 x float> %x0, <8 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6c,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x29,0x6c,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.256( <8 x float> %x0, <8 x i32> %src, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_maskz_vcvtt_ps2udqs_256_z(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2udqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2udqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.256( <8 x float> %x0, <8 x i32> zeroinitializer, i8 %mask, i32 4) + ret <8 x i32> %res +} + +define <8 x i32> @test_int_x86_mask_vcvtt_ps2udqs_256_undef(<8 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0xa9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.256( <8 x float> %x0, <8 x i32> undef, i8 %mask, i32 4) + ret <8 x i32> %res +} + + +define <8 x i32> @test_int_x86_mask_vcvtt_ps2udqs_256_default(<8 x float>* %x0) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_256_default: +; X64: # %bb.0: +; X64-NEXT: vcvttps2udqs (%rdi), %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x6c,0x07] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_256_default: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: vcvttps2udqs (%eax), %ymm0 # encoding: [0x62,0xf5,0x7c,0x28,0x6c,0x00] +; X86-NEXT: retl # encoding: [0xc3] + %x10 = load <8 x float>, <8 x float>* %x0 + %res = call <8 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.256( <8 x float> %x10, <8 x i32> undef, i8 -1, i32 4) + ret <8 x i32> %res +} +declare <8 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.round.256(<8 x float>, <8 x i32>, i8 , i32) + +define <4 x i64> @test_int_x86_maskz_vcvtt_ps2qqs_256_z(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2qqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2qqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.256( <4 x float> %x0, <4 x i64> zeroinitializer, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_mask_vcvtt_ps2qqs_256_undef(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2qqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2qqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.256( <4 x float> %x0, <4 x i64> undef, i8 %mask, i32 4) + ret <4 x i64> %res +} +declare <4 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.round.256(<4 x float>, <4 x i64>, i8 , i32) + +define <4 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_256(<4 x float> %x0, <4 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %xmm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x6c,0xc8] +; X64-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_256: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %xmm0, %ymm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x29,0x6c,0xc8] +; X86-NEXT: vmovaps %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.256( <4 x float> %x0, <4 x i64> %src, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_maskz_vcvtt_ps2uqqs_256_z(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2uqqs_256_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2uqqs_256_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.256( <4 x float> %x0, <4 x i64> zeroinitializer, i8 %mask, i32 4) + ret <4 x i64> %res +} + +define <4 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_256_undef(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_256_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_256_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %xmm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0xa9,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.256( <4 x float> %x0, <4 x i64> undef, i8 %mask, i32 4) + ret <4 x i64> %res +} + + +define <4 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_256_default(<4 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_256_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2uqqs %xmm0, %ymm0 # encoding: [0x62,0xf5,0x7d,0x28,0x6c,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.256( <4 x float> %x0, <4 x i64> undef, i8 -1, i32 4) + ret <4 x i64> %res +} + +declare <4 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.round.256(<4 x float>, <4 x i64>, i8 , i32) + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2dqs_128(<2 x double> %x0, <4 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.128( <2 x double> %x0, <4 x i32> %src, i8 %mask) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_maskz_vcvtt_pd2dqs_128_z(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2dqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2dqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.128( <2 x double> %x0, <4 x i32> zeroinitializer, i8 %mask) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2dqs_128_undef(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2dqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2dqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.128( <2 x double> %x0, <4 x i32> undef, i8 %mask) + ret <4 x i32> %res +} + + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2dqs_128_default(<2 x double> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_pd2dqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttpd2dqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0xfc,0x08,0x6d,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.128( <2 x double> %x0, <4 x i32> undef, i8 -1) + ret <4 x i32> %res +} +declare <4 x i32> @llvm.x86.avx10.mask.vcvttpd2dqs.128(<2 x double>, <4 x i32>, i8) + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2udqs_128(<2 x double> %x0, <4 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.128( <2 x double> %x0, <4 x i32> %src, i8 %mask) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_maskz_vcvtt_pd2udqs_128_z(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2udqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2udqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfc,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.128( <2 x double> %x0, <4 x i32> zeroinitializer, i8 %mask) + ret <4 x i32> %res +} + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2udqs_128_undef(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2udqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2udqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2dqs %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0xfc,0x09,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.128( <2 x double> %x0, <4 x i32> undef, i8 %mask) + ret <4 x i32> %res +} + + +define <4 x i32> @test_int_x86_mask_vcvtt_pd2udqs_128_default(<2 x double> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_pd2udqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttpd2udqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0xfc,0x08,0x6c,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.128( <2 x double> %x0, <4 x i32> undef, i8 -1) + ret <4 x i32> %res +} +declare <4 x i32> @llvm.x86.avx10.mask.vcvttpd2udqs.128(<2 x double>, <4 x i32>, i8) + +define <2 x i64> @test_int_x86_mask_vcvtt_pd2qqs_128(<2 x double> %x0, <2 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x09,0x6d,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x09,0x6d,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.128( <2 x double> %x0, <2 x i64> %src, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_maskz_vcvtt_pd2qqs_128_z(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2qqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2qqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.128( <2 x double> %x0, <2 x i64> zeroinitializer, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_mask_vcvtt_pd2qqs_128_undef(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2qqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2qqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.128( <2 x double> %x0, <2 x i64> undef, i8 %mask) + ret <2 x i64> %res +} + + +define <2 x i64> @test_int_x86_mask_vcvtt_pd2qqs_128_default(<2 x double> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_pd2qqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttpd2qqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0xfd,0x08,0x6d,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.128( <2 x double> %x0, <2 x i64> undef, i8 -1) + ret <2 x i64> %res +} +declare <2 x i64> @llvm.x86.avx10.mask.vcvttpd2qqs.128(<2 x double>, <2 x i64>, i8) + +define <2 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_128(<2 x double> %x0, <2 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x09,0x6c,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0xfd,0x09,0x6c,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.128( <2 x double> %x0, <2 x i64> %src, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_maskz_vcvtt_pd2uqqs_128_z(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_pd2uqqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_pd2uqqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.128( <2 x double> %x0, <2 x i64> zeroinitializer, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_128_undef(<2 x double> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttpd2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttpd2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0xfd,0x89,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.128( <2 x double> %x0, <2 x i64> undef, i8 %mask) + ret <2 x i64> %res +} + + +define <2 x i64> @test_int_x86_mask_vcvtt_pd2uqqs_128_default(<2 x double> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_pd2uqqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttpd2uqqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0xfd,0x08,0x6c,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.128( <2 x double> %x0, <2 x i64> undef, i8 -1) + ret <2 x i64> %res +} +declare <2 x i64> @llvm.x86.avx10.mask.vcvttpd2uqqs.128(<2 x double>, <2 x i64>, i8) + +define <2 x i64> @test_int_x86_mask_vcvtt_ps2qqs_128_default(<4 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2qqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2qqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x6d,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.128( <4 x float> %x0, <2 x i64> undef, i8 -1) + ret <2 x i64> %res +} + +define <4 x i32> @test_int_x86_mask_vcvtt_ps2dqs_128(<4 x float> %x0, <4 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x6d,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x6d,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.128( <4 x float> %x0, <4 x i32> %src, i8 %mask) + ret <4 x i32> %res +} +define <4 x i32> @test_int_x86_maskz_vcvtt_ps2dqs_128_z(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2dqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2dqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.128( <4 x float> %x0, <4 x i32> zeroinitializer, i8 %mask) + ret <4 x i32> %res +} +define <4 x i32> @test_int_x86_mask_vcvtt_ps2dqs_128_undef(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2dqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2dqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2dqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.128( <4 x float> %x0, <4 x i32> undef, i8 %mask) + ret <4 x i32> %res +} +define <4 x i32> @test_int_x86_mask_vcvtt_ps2dqs_128_default(<4 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2dqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2dqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x6d,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.128( <4 x float> %x0, <4 x i32> undef, i8 -1) + ret <4 x i32> %res +} +declare <4 x i32> @llvm.x86.avx10.mask.vcvttps2dqs.128(<4 x float>, <4 x i32>, i8) + +define <4 x i32> @test_int_x86_mask_vcvtt_ps2udqs_128(<4 x float> %x0, <4 x i32> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x6c,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7c,0x09,0x6c,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.128( <4 x float> %x0, <4 x i32> %src, i8 %mask) + ret <4 x i32> %res +} +define <4 x i32> @test_int_x86_maskz_vcvtt_ps2udqs_128_z(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2udqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2udqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.128( <4 x float> %x0, <4 x i32> zeroinitializer, i8 %mask) + ret <4 x i32> %res +} +define <4 x i32> @test_int_x86_mask_vcvtt_ps2udqs_128_undef(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2udqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2udqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2udqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2udqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7c,0x89,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.128( <4 x float> %x0, <4 x i32> undef, i8 %mask) + ret <4 x i32> %res +} +define <4 x i32> @test_int_x86_mask_vcvtt_ps2udqs_128_default(<4 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2udqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2udqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7c,0x08,0x6c,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.128( <4 x float> %x0, <4 x i32> undef, i8 -1) + ret <4 x i32> %res +} +declare <4 x i32> @llvm.x86.avx10.mask.vcvttps2udqs.128(<4 x float>, <4 x i32>, i8) + +define <2 x i64> @test_int_x86_mask_vcvtt_ps2qqs_128_undef(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2qqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2qqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.128( <4 x float> %x0, <2 x i64> undef, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_maskz_vcvtt_ps2qqs_128_z(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2qqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6d,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2qqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6d,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.128( <4 x float> %x0, <2 x i64> zeroinitializer, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_mask_vcvtt_ps2qqs_128(<4 x float> %x0, <2 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2qqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2qqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6d,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2qqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2qqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6d,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.128( <4 x float> %x0, <2 x i64> %src, i8 %mask) + ret <2 x i64> %res +} +declare <2 x i64> @llvm.x86.avx10.mask.vcvttps2qqs.128(<4 x float>, <2 x i64>, i8) + +define <2 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_128(<4 x float> %x0, <2 x i64> %src, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6c,0xc8] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf5,0x7d,0x09,0x6c,0xc8] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.128( <4 x float> %x0, <2 x i64> %src, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_128_undef(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_128_undef: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_128_undef: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.128( <4 x float> %x0, <2 x i64> undef, i8 %mask) + ret <2 x i64> %res +} + +define <2 x i64> @test_int_x86_mask_vcvtt_ps2uqqs_128_default(<4 x float> %x0) { +; CHECK-LABEL: test_int_x86_mask_vcvtt_ps2uqqs_128_default: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttps2uqqs %xmm0, %xmm0 # encoding: [0x62,0xf5,0x7d,0x08,0x6c,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.128( <4 x float> %x0, <2 x i64> undef, i8 -1) + ret <2 x i64> %res +} +define <2 x i64> @test_int_x86_maskz_vcvtt_ps2uqqs_128_z(<4 x float> %x0, i8 %mask) { +; X64-LABEL: test_int_x86_maskz_vcvtt_ps2uqqs_128_z: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vcvttps2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6c,0xc0] +; X64-NEXT: retq # encoding: [0xc3] +; +; X86-LABEL: test_int_x86_maskz_vcvtt_ps2uqqs_128_z: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vcvttps2uqqs %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf5,0x7d,0x89,0x6c,0xc0] +; X86-NEXT: retl # encoding: [0xc3] + %res = call <2 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.128( <4 x float> %x0, <2 x i64> zeroinitializer, i8 %mask) + ret <2 x i64> %res +} +declare <2 x i64> @llvm.x86.avx10.mask.vcvttps2uqqs.128(<4 x float>, <2 x i64>, i8) + diff --git a/llvm/test/CodeGen/X86/avx10_2satcvtds-x64-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2satcvtds-x64-intrinsics.ll new file mode 100644 index 0000000..f5be929 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx10_2satcvtds-x64-intrinsics.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+avx10.2-256 | FileCheck %s + +define i64 @test_x86_avx512_vcvttsd2si64(<2 x double> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttsd2si64: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttsd2sis %xmm0, %rcx # encoding: [0x62,0xf5,0xff,0x08,0x6d,0xc8] +; CHECK-NEXT: vcvttsd2sis {sae}, %xmm0, %rax # encoding: [0x62,0xf5,0xff,0x18,0x6d,0xc0] +; CHECK-NEXT: addq %rcx, %rax # encoding: [0x48,0x01,0xc8] +; CHECK-NEXT: retq # encoding: [0xc3] + %res0 = call i64 @llvm.x86.avx10.vcvttsd2sis64(<2 x double> %a0, i32 4) ; + %res1 = call i64 @llvm.x86.avx10.vcvttsd2sis64(<2 x double> %a0, i32 8) ; + %res2 = add i64 %res0, %res1 + ret i64 %res2 +} +declare i64 @llvm.x86.avx10.vcvttsd2sis64(<2 x double>, i32) nounwind readnone + +define i64 @test_x86_avx512_vcvttsd2usi64(<2 x double> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttsd2usi64: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttsd2usis %xmm0, %rcx # encoding: [0x62,0xf5,0xff,0x08,0x6c,0xc8] +; CHECK-NEXT: vcvttsd2usis {sae}, %xmm0, %rax # encoding: [0x62,0xf5,0xff,0x18,0x6c,0xc0] +; CHECK-NEXT: addq %rcx, %rax # encoding: [0x48,0x01,0xc8] +; CHECK-NEXT: retq # encoding: [0xc3] + %res0 = call i64 @llvm.x86.avx10.vcvttsd2usis64(<2 x double> %a0, i32 4) ; + %res1 = call i64 @llvm.x86.avx10.vcvttsd2usis64(<2 x double> %a0, i32 8) ; + %res2 = add i64 %res0, %res1 + ret i64 %res2 +} +declare i64 @llvm.x86.avx10.vcvttsd2usis64(<2 x double>, i32) nounwind readnone + +define i64 @test_x86_avx512_vcvttss2sis64(<4 x float> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttss2sis64: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttss2sis %xmm0, %rcx # encoding: [0x62,0xf5,0xfe,0x08,0x6d,0xc8] +; CHECK-NEXT: vcvttss2sis {sae}, %xmm0, %rax # encoding: [0x62,0xf5,0xfe,0x18,0x6d,0xc0] +; CHECK-NEXT: addq %rcx, %rax # encoding: [0x48,0x01,0xc8] +; CHECK-NEXT: retq # encoding: [0xc3] + %res0 = call i64 @llvm.x86.avx10.vcvttss2sis64(<4 x float> %a0, i32 4) ; + %res1 = call i64 @llvm.x86.avx10.vcvttss2sis64(<4 x float> %a0, i32 8) ; + %res2 = add i64 %res0, %res1 + ret i64 %res2 +} +declare i64 @llvm.x86.avx10.vcvttss2sis64(<4 x float>, i32) nounwind readnone + +define i64 @test_x86_avx512_vcvttss2usis64(<4 x float> %a0) { +; CHECK-LABEL: test_x86_avx512_vcvttss2usis64: +; CHECK: # %bb.0: +; CHECK-NEXT: vcvttss2usis %xmm0, %rcx # encoding: [0x62,0xf5,0xfe,0x08,0x6c,0xc8] +; CHECK-NEXT: vcvttss2usis {sae}, %xmm0, %rax # encoding: [0x62,0xf5,0xfe,0x18,0x6c,0xc0] +; CHECK-NEXT: addq %rcx, %rax # encoding: [0x48,0x01,0xc8] +; CHECK-NEXT: retq # encoding: [0xc3] + %res0 = call i64 @llvm.x86.avx10.vcvttss2usis64(<4 x float> %a0, i32 4) ; + %res1 = call i64 @llvm.x86.avx10.vcvttss2usis64(<4 x float> %a0, i32 8) ; + %res2 = add i64 %res0, %res1 + ret i64 %res2 +} +declare i64 @llvm.x86.avx10.vcvttss2usis64(<4 x float>, i32) nounwind readnone diff --git a/llvm/test/MC/Disassembler/X86/avx10.2-satcvtds-32.txt b/llvm/test/MC/Disassembler/X86/avx10.2-satcvtds-32.txt new file mode 100644 index 0000000..b2b8267 --- /dev/null +++ b/llvm/test/MC/Disassembler/X86/avx10.2-satcvtds-32.txt @@ -0,0 +1,1043 @@ +# RUN: llvm-mc --disassemble %s -triple=i386 | FileCheck %s --check-prefixes=ATT +# RUN: llvm-mc --disassemble %s -triple=i386 -x86-asm-syntax=intel --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL + +# ATT: vcvttpd2dqs %xmm3, %xmm2 +# INTEL: vcvttpd2dqs xmm2, xmm3 +0x62,0xf5,0xfc,0x08,0x6d,0xd3 + +# ATT: vcvttpd2dqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttpd2dqs xmm2 {k7}, xmm3 +0x62,0xf5,0xfc,0x0f,0x6d,0xd3 + +# ATT: vcvttpd2dqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttpd2dqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0xfc,0x8f,0x6d,0xd3 + +# ATT: vcvttpd2dqs %ymm3, %xmm2 +# INTEL: vcvttpd2dqs xmm2, ymm3 +0x62,0xf5,0xfc,0x28,0x6d,0xd3 + +# ATT: vcvttpd2dqs {sae}, %ymm3, %xmm2 +# INTEL: vcvttpd2dqs xmm2, ymm3, {sae} +0x62,0xf5,0xf8,0x18,0x6d,0xd3 + +# ATT: vcvttpd2dqs %ymm3, %xmm2 {%k7} +# INTEL: vcvttpd2dqs xmm2 {k7}, ymm3 +0x62,0xf5,0xfc,0x2f,0x6d,0xd3 + +# ATT: vcvttpd2dqs {sae}, %ymm3, %xmm2 {%k7} {z} +# INTEL: vcvttpd2dqs xmm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0xf8,0x9f,0x6d,0xd3 + +# ATT: vcvttpd2dqs %zmm3, %ymm2 +# INTEL: vcvttpd2dqs ymm2, zmm3 +0x62,0xf5,0xfc,0x48,0x6d,0xd3 + +# ATT: vcvttpd2dqs {sae}, %zmm3, %ymm2 +# INTEL: vcvttpd2dqs ymm2, zmm3, {sae} +0x62,0xf5,0xfc,0x18,0x6d,0xd3 + +# ATT: vcvttpd2dqs %zmm3, %ymm2 {%k7} +# INTEL: vcvttpd2dqs ymm2 {k7}, zmm3 +0x62,0xf5,0xfc,0x4f,0x6d,0xd3 + +# ATT: vcvttpd2dqs {sae}, %zmm3, %ymm2 {%k7} {z} +# INTEL: vcvttpd2dqs ymm2 {k7} {z}, zmm3, {sae} +0x62,0xf5,0xfc,0x9f,0x6d,0xd3 + +# ATT: vcvttpd2dqsx 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttpd2dqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfc,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2dqsx 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttpd2dqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfc,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2dqs (%eax){1to2}, %xmm2 +# INTEL: vcvttpd2dqs xmm2, qword ptr [eax]{1to2} +0x62,0xf5,0xfc,0x18,0x6d,0x10 + +# ATT: vcvttpd2dqsx -512(,%ebp,2), %xmm2 +# INTEL: vcvttpd2dqs xmm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0xfc,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2dqsx 2032(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttpd2dqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0xfc,0x8f,0x6d,0x51,0x7f + +# ATT: vcvttpd2dqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +# INTEL: vcvttpd2dqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +0x62,0xf5,0xfc,0x9f,0x6d,0x52,0x80 + +# ATT: vcvttpd2dqs (%eax){1to4}, %xmm2 +# INTEL: vcvttpd2dqs xmm2, qword ptr [eax]{1to4} +0x62,0xf5,0xfc,0x38,0x6d,0x10 + +# ATT: vcvttpd2dqsy -1024(,%ebp,2), %xmm2 +# INTEL: vcvttpd2dqs xmm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0xfc,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2dqsy 4064(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttpd2dqs xmm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0xfc,0xaf,0x6d,0x51,0x7f + +# ATT: vcvttpd2dqs -1024(%edx){1to4}, %xmm2 {%k7} {z} +# INTEL: vcvttpd2dqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +0x62,0xf5,0xfc,0xbf,0x6d,0x52,0x80 + +# ATT: vcvttpd2dqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttpd2dqs ymm2, zmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfc,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2dqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttpd2dqs ymm2 {k7}, zmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfc,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2dqs (%eax){1to8}, %ymm2 +# INTEL: vcvttpd2dqs ymm2, qword ptr [eax]{1to8} +0x62,0xf5,0xfc,0x58,0x6d,0x10 + +# ATT: vcvttpd2dqs -2048(,%ebp,2), %ymm2 +# INTEL: vcvttpd2dqs ymm2, zmmword ptr [2*ebp - 2048] +0x62,0xf5,0xfc,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2dqs 8128(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttpd2dqs ymm2 {k7} {z}, zmmword ptr [ecx + 8128] +0x62,0xf5,0xfc,0xcf,0x6d,0x51,0x7f + +# ATT: vcvttpd2dqs -1024(%edx){1to8}, %ymm2 {%k7} {z} +# INTEL: vcvttpd2dqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +0x62,0xf5,0xfc,0xdf,0x6d,0x52,0x80 + +# ATT: vcvttpd2qqs %xmm3, %xmm2 +# INTEL: vcvttpd2qqs xmm2, xmm3 +0x62,0xf5,0xfd,0x08,0x6d,0xd3 + +# ATT: vcvttpd2qqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttpd2qqs xmm2 {k7}, xmm3 +0x62,0xf5,0xfd,0x0f,0x6d,0xd3 + +# ATT: vcvttpd2qqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttpd2qqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0xfd,0x8f,0x6d,0xd3 + +# ATT: vcvttpd2qqs %ymm3, %ymm2 +# INTEL: vcvttpd2qqs ymm2, ymm3 +0x62,0xf5,0xfd,0x28,0x6d,0xd3 + +# ATT: vcvttpd2qqs {sae}, %ymm3, %ymm2 +# INTEL: vcvttpd2qqs ymm2, ymm3, {sae} +0x62,0xf5,0xf9,0x18,0x6d,0xd3 + +# ATT: vcvttpd2qqs %ymm3, %ymm2 {%k7} +# INTEL: vcvttpd2qqs ymm2 {k7}, ymm3 +0x62,0xf5,0xfd,0x2f,0x6d,0xd3 + +# ATT: vcvttpd2qqs {sae}, %ymm3, %ymm2 {%k7} {z} +# INTEL: vcvttpd2qqs ymm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0xf9,0x9f,0x6d,0xd3 + +# ATT: vcvttpd2qqs %zmm3, %zmm2 +# INTEL: vcvttpd2qqs zmm2, zmm3 +0x62,0xf5,0xfd,0x48,0x6d,0xd3 + +# ATT: vcvttpd2qqs {sae}, %zmm3, %zmm2 +# INTEL: vcvttpd2qqs zmm2, zmm3, {sae} +0x62,0xf5,0xfd,0x18,0x6d,0xd3 + +# ATT: vcvttpd2qqs %zmm3, %zmm2 {%k7} +# INTEL: vcvttpd2qqs zmm2 {k7}, zmm3 +0x62,0xf5,0xfd,0x4f,0x6d,0xd3 + +# ATT: vcvttpd2qqs {sae}, %zmm3, %zmm2 {%k7} {z} +# INTEL: vcvttpd2qqs zmm2 {k7} {z}, zmm3, {sae} +0x62,0xf5,0xfd,0x9f,0x6d,0xd3 + +# ATT: vcvttpd2qqs 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttpd2qqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfd,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2qqs 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttpd2qqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfd,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2qqs (%eax){1to2}, %xmm2 +# INTEL: vcvttpd2qqs xmm2, qword ptr [eax]{1to2} +0x62,0xf5,0xfd,0x18,0x6d,0x10 + +# ATT: vcvttpd2qqs -512(,%ebp,2), %xmm2 +# INTEL: vcvttpd2qqs xmm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0xfd,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2qqs 2032(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttpd2qqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0xfd,0x8f,0x6d,0x51,0x7f + +# ATT: vcvttpd2qqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +# INTEL: vcvttpd2qqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +0x62,0xf5,0xfd,0x9f,0x6d,0x52,0x80 + +# ATT: vcvttpd2qqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttpd2qqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfd,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2qqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttpd2qqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfd,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2qqs (%eax){1to4}, %ymm2 +# INTEL: vcvttpd2qqs ymm2, qword ptr [eax]{1to4} +0x62,0xf5,0xfd,0x38,0x6d,0x10 + +# ATT: vcvttpd2qqs -1024(,%ebp,2), %ymm2 +# INTEL: vcvttpd2qqs ymm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0xfd,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2qqs 4064(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttpd2qqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0xfd,0xaf,0x6d,0x51,0x7f + +# ATT: vcvttpd2qqs -1024(%edx){1to4}, %ymm2 {%k7} {z} +# INTEL: vcvttpd2qqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +0x62,0xf5,0xfd,0xbf,0x6d,0x52,0x80 + +# ATT: vcvttpd2qqs 268435456(%esp,%esi,8), %zmm2 +# INTEL: vcvttpd2qqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfd,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2qqs 291(%edi,%eax,4), %zmm2 {%k7} +# INTEL: vcvttpd2qqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfd,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2qqs (%eax){1to8}, %zmm2 +# INTEL: vcvttpd2qqs zmm2, qword ptr [eax]{1to8} +0x62,0xf5,0xfd,0x58,0x6d,0x10 + +# ATT: vcvttpd2qqs -2048(,%ebp,2), %zmm2 +# INTEL: vcvttpd2qqs zmm2, zmmword ptr [2*ebp - 2048] +0x62,0xf5,0xfd,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2qqs 8128(%ecx), %zmm2 {%k7} {z} +# INTEL: vcvttpd2qqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +0x62,0xf5,0xfd,0xcf,0x6d,0x51,0x7f + +# ATT: vcvttpd2qqs -1024(%edx){1to8}, %zmm2 {%k7} {z} +# INTEL: vcvttpd2qqs zmm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +0x62,0xf5,0xfd,0xdf,0x6d,0x52,0x80 + +# ATT: vcvttpd2udqs %xmm3, %xmm2 +# INTEL: vcvttpd2udqs xmm2, xmm3 +0x62,0xf5,0xfc,0x08,0x6c,0xd3 + +# ATT: vcvttpd2udqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttpd2udqs xmm2 {k7}, xmm3 +0x62,0xf5,0xfc,0x0f,0x6c,0xd3 + +# ATT: vcvttpd2udqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttpd2udqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0xfc,0x8f,0x6c,0xd3 + +# ATT: vcvttpd2udqs %ymm3, %xmm2 +# INTEL: vcvttpd2udqs xmm2, ymm3 +0x62,0xf5,0xfc,0x28,0x6c,0xd3 + +# ATT: vcvttpd2udqs {sae}, %ymm3, %xmm2 +# INTEL: vcvttpd2udqs xmm2, ymm3, {sae} +0x62,0xf5,0xf8,0x18,0x6c,0xd3 + +# ATT: vcvttpd2udqs %ymm3, %xmm2 {%k7} +# INTEL: vcvttpd2udqs xmm2 {k7}, ymm3 +0x62,0xf5,0xfc,0x2f,0x6c,0xd3 + +# ATT: vcvttpd2udqs {sae}, %ymm3, %xmm2 {%k7} {z} +# INTEL: vcvttpd2udqs xmm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0xf8,0x9f,0x6c,0xd3 + +# ATT: vcvttpd2udqs %zmm3, %ymm2 +# INTEL: vcvttpd2udqs ymm2, zmm3 +0x62,0xf5,0xfc,0x48,0x6c,0xd3 + +# ATT: vcvttpd2udqs {sae}, %zmm3, %ymm2 +# INTEL: vcvttpd2udqs ymm2, zmm3, {sae} +0x62,0xf5,0xfc,0x18,0x6c,0xd3 + +# ATT: vcvttpd2udqs %zmm3, %ymm2 {%k7} +# INTEL: vcvttpd2udqs ymm2 {k7}, zmm3 +0x62,0xf5,0xfc,0x4f,0x6c,0xd3 + +# ATT: vcvttpd2udqs {sae}, %zmm3, %ymm2 {%k7} {z} +# INTEL: vcvttpd2udqs ymm2 {k7} {z}, zmm3, {sae} +0x62,0xf5,0xfc,0x9f,0x6c,0xd3 + +# ATT: vcvttpd2udqsx 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttpd2udqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfc,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2udqsx 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttpd2udqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfc,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2udqs (%eax){1to2}, %xmm2 +# INTEL: vcvttpd2udqs xmm2, qword ptr [eax]{1to2} +0x62,0xf5,0xfc,0x18,0x6c,0x10 + +# ATT: vcvttpd2udqsx -512(,%ebp,2), %xmm2 +# INTEL: vcvttpd2udqs xmm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0xfc,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2udqsx 2032(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttpd2udqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0xfc,0x8f,0x6c,0x51,0x7f + +# ATT: vcvttpd2udqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +# INTEL: vcvttpd2udqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +0x62,0xf5,0xfc,0x9f,0x6c,0x52,0x80 + +# ATT: vcvttpd2udqs (%eax){1to4}, %xmm2 +# INTEL: vcvttpd2udqs xmm2, qword ptr [eax]{1to4} +0x62,0xf5,0xfc,0x38,0x6c,0x10 + +# ATT: vcvttpd2udqsy -1024(,%ebp,2), %xmm2 +# INTEL: vcvttpd2udqs xmm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0xfc,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2udqsy 4064(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttpd2udqs xmm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0xfc,0xaf,0x6c,0x51,0x7f + +# ATT: vcvttpd2udqs -1024(%edx){1to4}, %xmm2 {%k7} {z} +# INTEL: vcvttpd2udqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +0x62,0xf5,0xfc,0xbf,0x6c,0x52,0x80 + +# ATT: vcvttpd2udqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttpd2udqs ymm2, zmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfc,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2udqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttpd2udqs ymm2 {k7}, zmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfc,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2udqs (%eax){1to8}, %ymm2 +# INTEL: vcvttpd2udqs ymm2, qword ptr [eax]{1to8} +0x62,0xf5,0xfc,0x58,0x6c,0x10 + +# ATT: vcvttpd2udqs -2048(,%ebp,2), %ymm2 +# INTEL: vcvttpd2udqs ymm2, zmmword ptr [2*ebp - 2048] +0x62,0xf5,0xfc,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2udqs 8128(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttpd2udqs ymm2 {k7} {z}, zmmword ptr [ecx + 8128] +0x62,0xf5,0xfc,0xcf,0x6c,0x51,0x7f + +# ATT: vcvttpd2udqs -1024(%edx){1to8}, %ymm2 {%k7} {z} +# INTEL: vcvttpd2udqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +0x62,0xf5,0xfc,0xdf,0x6c,0x52,0x80 + +# ATT: vcvttpd2uqqs %xmm3, %xmm2 +# INTEL: vcvttpd2uqqs xmm2, xmm3 +0x62,0xf5,0xfd,0x08,0x6c,0xd3 + +# ATT: vcvttpd2uqqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttpd2uqqs xmm2 {k7}, xmm3 +0x62,0xf5,0xfd,0x0f,0x6c,0xd3 + +# ATT: vcvttpd2uqqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttpd2uqqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0xfd,0x8f,0x6c,0xd3 + +# ATT: vcvttpd2uqqs %ymm3, %ymm2 +# INTEL: vcvttpd2uqqs ymm2, ymm3 +0x62,0xf5,0xfd,0x28,0x6c,0xd3 + +# ATT: vcvttpd2uqqs {sae}, %ymm3, %ymm2 +# INTEL: vcvttpd2uqqs ymm2, ymm3, {sae} +0x62,0xf5,0xf9,0x18,0x6c,0xd3 + +# ATT: vcvttpd2uqqs %ymm3, %ymm2 {%k7} +# INTEL: vcvttpd2uqqs ymm2 {k7}, ymm3 +0x62,0xf5,0xfd,0x2f,0x6c,0xd3 + +# ATT: vcvttpd2uqqs {sae}, %ymm3, %ymm2 {%k7} {z} +# INTEL: vcvttpd2uqqs ymm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0xf9,0x9f,0x6c,0xd3 + +# ATT: vcvttpd2uqqs %zmm3, %zmm2 +# INTEL: vcvttpd2uqqs zmm2, zmm3 +0x62,0xf5,0xfd,0x48,0x6c,0xd3 + +# ATT: vcvttpd2uqqs {sae}, %zmm3, %zmm2 +# INTEL: vcvttpd2uqqs zmm2, zmm3, {sae} +0x62,0xf5,0xfd,0x18,0x6c,0xd3 + +# ATT: vcvttpd2uqqs %zmm3, %zmm2 {%k7} +# INTEL: vcvttpd2uqqs zmm2 {k7}, zmm3 +0x62,0xf5,0xfd,0x4f,0x6c,0xd3 + +# ATT: vcvttpd2uqqs {sae}, %zmm3, %zmm2 {%k7} {z} +# INTEL: vcvttpd2uqqs zmm2 {k7} {z}, zmm3, {sae} +0x62,0xf5,0xfd,0x9f,0x6c,0xd3 + +# ATT: vcvttpd2uqqs 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttpd2uqqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfd,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2uqqs 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttpd2uqqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfd,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2uqqs (%eax){1to2}, %xmm2 +# INTEL: vcvttpd2uqqs xmm2, qword ptr [eax]{1to2} +0x62,0xf5,0xfd,0x18,0x6c,0x10 + +# ATT: vcvttpd2uqqs -512(,%ebp,2), %xmm2 +# INTEL: vcvttpd2uqqs xmm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0xfd,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2uqqs 2032(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttpd2uqqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0xfd,0x8f,0x6c,0x51,0x7f + +# ATT: vcvttpd2uqqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +# INTEL: vcvttpd2uqqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +0x62,0xf5,0xfd,0x9f,0x6c,0x52,0x80 + +# ATT: vcvttpd2uqqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttpd2uqqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfd,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2uqqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttpd2uqqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfd,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2uqqs (%eax){1to4}, %ymm2 +# INTEL: vcvttpd2uqqs ymm2, qword ptr [eax]{1to4} +0x62,0xf5,0xfd,0x38,0x6c,0x10 + +# ATT: vcvttpd2uqqs -1024(,%ebp,2), %ymm2 +# INTEL: vcvttpd2uqqs ymm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0xfd,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2uqqs 4064(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttpd2uqqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0xfd,0xaf,0x6c,0x51,0x7f + +# ATT: vcvttpd2uqqs -1024(%edx){1to4}, %ymm2 {%k7} {z} +# INTEL: vcvttpd2uqqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +0x62,0xf5,0xfd,0xbf,0x6c,0x52,0x80 + +# ATT: vcvttpd2uqqs 268435456(%esp,%esi,8), %zmm2 +# INTEL: vcvttpd2uqqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0xfd,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2uqqs 291(%edi,%eax,4), %zmm2 {%k7} +# INTEL: vcvttpd2uqqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0xfd,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2uqqs (%eax){1to8}, %zmm2 +# INTEL: vcvttpd2uqqs zmm2, qword ptr [eax]{1to8} +0x62,0xf5,0xfd,0x58,0x6c,0x10 + +# ATT: vcvttpd2uqqs -2048(,%ebp,2), %zmm2 +# INTEL: vcvttpd2uqqs zmm2, zmmword ptr [2*ebp - 2048] +0x62,0xf5,0xfd,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2uqqs 8128(%ecx), %zmm2 {%k7} {z} +# INTEL: vcvttpd2uqqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +0x62,0xf5,0xfd,0xcf,0x6c,0x51,0x7f + +# ATT: vcvttpd2uqqs -1024(%edx){1to8}, %zmm2 {%k7} {z} +# INTEL: vcvttpd2uqqs zmm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +0x62,0xf5,0xfd,0xdf,0x6c,0x52,0x80 + +# ATT: vcvttps2dqs %xmm3, %xmm2 +# INTEL: vcvttps2dqs xmm2, xmm3 +0x62,0xf5,0x7c,0x08,0x6d,0xd3 + +# ATT: vcvttps2dqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttps2dqs xmm2 {k7}, xmm3 +0x62,0xf5,0x7c,0x0f,0x6d,0xd3 + +# ATT: vcvttps2dqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttps2dqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0x7c,0x8f,0x6d,0xd3 + +# ATT: vcvttps2dqs %ymm3, %ymm2 +# INTEL: vcvttps2dqs ymm2, ymm3 +0x62,0xf5,0x7c,0x28,0x6d,0xd3 + +# ATT: vcvttps2dqs {sae}, %ymm3, %ymm2 +# INTEL: vcvttps2dqs ymm2, ymm3, {sae} +0x62,0xf5,0x78,0x18,0x6d,0xd3 + +# ATT: vcvttps2dqs %ymm3, %ymm2 {%k7} +# INTEL: vcvttps2dqs ymm2 {k7}, ymm3 +0x62,0xf5,0x7c,0x2f,0x6d,0xd3 + +# ATT: vcvttps2dqs {sae}, %ymm3, %ymm2 {%k7} {z} +# INTEL: vcvttps2dqs ymm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0x78,0x9f,0x6d,0xd3 + +# ATT: vcvttps2dqs %zmm3, %zmm2 +# INTEL: vcvttps2dqs zmm2, zmm3 +0x62,0xf5,0x7c,0x48,0x6d,0xd3 + +# ATT: vcvttps2dqs {sae}, %zmm3, %zmm2 +# INTEL: vcvttps2dqs zmm2, zmm3, {sae} +0x62,0xf5,0x7c,0x18,0x6d,0xd3 + +# ATT: vcvttps2dqs %zmm3, %zmm2 {%k7} +# INTEL: vcvttps2dqs zmm2 {k7}, zmm3 +0x62,0xf5,0x7c,0x4f,0x6d,0xd3 + +# ATT: vcvttps2dqs {sae}, %zmm3, %zmm2 {%k7} {z} +# INTEL: vcvttps2dqs zmm2 {k7} {z}, zmm3, {sae} +0x62,0xf5,0x7c,0x9f,0x6d,0xd3 + +# ATT: vcvttps2dqs 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttps2dqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7c,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2dqs 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttps2dqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7c,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2dqs (%eax){1to4}, %xmm2 +# INTEL: vcvttps2dqs xmm2, dword ptr [eax]{1to4} +0x62,0xf5,0x7c,0x18,0x6d,0x10 + +# ATT: vcvttps2dqs -512(,%ebp,2), %xmm2 +# INTEL: vcvttps2dqs xmm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0x7c,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2dqs 2032(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttps2dqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0x7c,0x8f,0x6d,0x51,0x7f + +# ATT: vcvttps2dqs -512(%edx){1to4}, %xmm2 {%k7} {z} +# INTEL: vcvttps2dqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4} +0x62,0xf5,0x7c,0x9f,0x6d,0x52,0x80 + +# ATT: vcvttps2dqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttps2dqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7c,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2dqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttps2dqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7c,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2dqs (%eax){1to8}, %ymm2 +# INTEL: vcvttps2dqs ymm2, dword ptr [eax]{1to8} +0x62,0xf5,0x7c,0x38,0x6d,0x10 + +# ATT: vcvttps2dqs -1024(,%ebp,2), %ymm2 +# INTEL: vcvttps2dqs ymm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0x7c,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2dqs 4064(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttps2dqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0x7c,0xaf,0x6d,0x51,0x7f + +# ATT: vcvttps2dqs -512(%edx){1to8}, %ymm2 {%k7} {z} +# INTEL: vcvttps2dqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8} +0x62,0xf5,0x7c,0xbf,0x6d,0x52,0x80 + +# ATT: vcvttps2dqs 268435456(%esp,%esi,8), %zmm2 +# INTEL: vcvttps2dqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7c,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2dqs 291(%edi,%eax,4), %zmm2 {%k7} +# INTEL: vcvttps2dqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7c,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2dqs (%eax){1to16}, %zmm2 +# INTEL: vcvttps2dqs zmm2, dword ptr [eax]{1to16} +0x62,0xf5,0x7c,0x58,0x6d,0x10 + +# ATT: vcvttps2dqs -2048(,%ebp,2), %zmm2 +# INTEL: vcvttps2dqs zmm2, zmmword ptr [2*ebp - 2048] +0x62,0xf5,0x7c,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttps2dqs 8128(%ecx), %zmm2 {%k7} {z} +# INTEL: vcvttps2dqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +0x62,0xf5,0x7c,0xcf,0x6d,0x51,0x7f + +# ATT: vcvttps2dqs -512(%edx){1to16}, %zmm2 {%k7} {z} +# INTEL: vcvttps2dqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16} +0x62,0xf5,0x7c,0xdf,0x6d,0x52,0x80 + +# ATT: vcvttps2qqs %xmm3, %xmm2 +# INTEL: vcvttps2qqs xmm2, xmm3 +0x62,0xf5,0x7d,0x08,0x6d,0xd3 + +# ATT: vcvttps2qqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttps2qqs xmm2 {k7}, xmm3 +0x62,0xf5,0x7d,0x0f,0x6d,0xd3 + +# ATT: vcvttps2qqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttps2qqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0x7d,0x8f,0x6d,0xd3 + +# ATT: vcvttps2qqs %xmm3, %ymm2 +# INTEL: vcvttps2qqs ymm2, xmm3 +0x62,0xf5,0x7d,0x28,0x6d,0xd3 + +# ATT: vcvttps2qqs {sae}, %xmm3, %ymm2 +# INTEL: vcvttps2qqs ymm2, xmm3, {sae} +0x62,0xf5,0x79,0x18,0x6d,0xd3 + +# ATT: vcvttps2qqs %xmm3, %ymm2 {%k7} +# INTEL: vcvttps2qqs ymm2 {k7}, xmm3 +0x62,0xf5,0x7d,0x2f,0x6d,0xd3 + +# ATT: vcvttps2qqs {sae}, %xmm3, %ymm2 {%k7} {z} +# INTEL: vcvttps2qqs ymm2 {k7} {z}, xmm3, {sae} +0x62,0xf5,0x79,0x9f,0x6d,0xd3 + +# ATT: vcvttps2qqs %ymm3, %zmm2 +# INTEL: vcvttps2qqs zmm2, ymm3 +0x62,0xf5,0x7d,0x48,0x6d,0xd3 + +# ATT: vcvttps2qqs {sae}, %ymm3, %zmm2 +# INTEL: vcvttps2qqs zmm2, ymm3, {sae} +0x62,0xf5,0x7d,0x18,0x6d,0xd3 + +# ATT: vcvttps2qqs %ymm3, %zmm2 {%k7} +# INTEL: vcvttps2qqs zmm2 {k7}, ymm3 +0x62,0xf5,0x7d,0x4f,0x6d,0xd3 + +# ATT: vcvttps2qqs {sae}, %ymm3, %zmm2 {%k7} {z} +# INTEL: vcvttps2qqs zmm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0x7d,0x9f,0x6d,0xd3 + +# ATT: vcvttps2qqs 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttps2qqs xmm2, qword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7d,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2qqs 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttps2qqs xmm2 {k7}, qword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7d,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2qqs (%eax){1to2}, %xmm2 +# INTEL: vcvttps2qqs xmm2, dword ptr [eax]{1to2} +0x62,0xf5,0x7d,0x18,0x6d,0x10 + +# ATT: vcvttps2qqs -256(,%ebp,2), %xmm2 +# INTEL: vcvttps2qqs xmm2, qword ptr [2*ebp - 256] +0x62,0xf5,0x7d,0x08,0x6d,0x14,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttps2qqs 1016(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttps2qqs xmm2 {k7} {z}, qword ptr [ecx + 1016] +0x62,0xf5,0x7d,0x8f,0x6d,0x51,0x7f + +# ATT: vcvttps2qqs -512(%edx){1to2}, %xmm2 {%k7} {z} +# INTEL: vcvttps2qqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to2} +0x62,0xf5,0x7d,0x9f,0x6d,0x52,0x80 + +# ATT: vcvttps2qqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttps2qqs ymm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7d,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2qqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttps2qqs ymm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7d,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2qqs (%eax){1to4}, %ymm2 +# INTEL: vcvttps2qqs ymm2, dword ptr [eax]{1to4} +0x62,0xf5,0x7d,0x38,0x6d,0x10 + +# ATT: vcvttps2qqs -512(,%ebp,2), %ymm2 +# INTEL: vcvttps2qqs ymm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0x7d,0x28,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2qqs 2032(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttps2qqs ymm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0x7d,0xaf,0x6d,0x51,0x7f + +# ATT: vcvttps2qqs -512(%edx){1to4}, %ymm2 {%k7} {z} +# INTEL: vcvttps2qqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to4} +0x62,0xf5,0x7d,0xbf,0x6d,0x52,0x80 + +# ATT: vcvttps2qqs 268435456(%esp,%esi,8), %zmm2 +# INTEL: vcvttps2qqs zmm2, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7d,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2qqs 291(%edi,%eax,4), %zmm2 {%k7} +# INTEL: vcvttps2qqs zmm2 {k7}, ymmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7d,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2qqs (%eax){1to8}, %zmm2 +# INTEL: vcvttps2qqs zmm2, dword ptr [eax]{1to8} +0x62,0xf5,0x7d,0x58,0x6d,0x10 + +# ATT: vcvttps2qqs -1024(,%ebp,2), %zmm2 +# INTEL: vcvttps2qqs zmm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0x7d,0x48,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2qqs 4064(%ecx), %zmm2 {%k7} {z} +# INTEL: vcvttps2qqs zmm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0x7d,0xcf,0x6d,0x51,0x7f + +# ATT: vcvttps2qqs -512(%edx){1to8}, %zmm2 {%k7} {z} +# INTEL: vcvttps2qqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to8} +0x62,0xf5,0x7d,0xdf,0x6d,0x52,0x80 + +# ATT: vcvttps2udqs %xmm3, %xmm2 +# INTEL: vcvttps2udqs xmm2, xmm3 +0x62,0xf5,0x7c,0x08,0x6c,0xd3 + +# ATT: vcvttps2udqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttps2udqs xmm2 {k7}, xmm3 +0x62,0xf5,0x7c,0x0f,0x6c,0xd3 + +# ATT: vcvttps2udqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttps2udqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0x7c,0x8f,0x6c,0xd3 + +# ATT: vcvttps2udqs %ymm3, %ymm2 +# INTEL: vcvttps2udqs ymm2, ymm3 +0x62,0xf5,0x7c,0x28,0x6c,0xd3 + +# ATT: vcvttps2udqs {sae}, %ymm3, %ymm2 +# INTEL: vcvttps2udqs ymm2, ymm3, {sae} +0x62,0xf5,0x78,0x18,0x6c,0xd3 + +# ATT: vcvttps2udqs %ymm3, %ymm2 {%k7} +# INTEL: vcvttps2udqs ymm2 {k7}, ymm3 +0x62,0xf5,0x7c,0x2f,0x6c,0xd3 + +# ATT: vcvttps2udqs {sae}, %ymm3, %ymm2 {%k7} {z} +# INTEL: vcvttps2udqs ymm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0x78,0x9f,0x6c,0xd3 + +# ATT: vcvttps2udqs %zmm3, %zmm2 +# INTEL: vcvttps2udqs zmm2, zmm3 +0x62,0xf5,0x7c,0x48,0x6c,0xd3 + +# ATT: vcvttps2udqs {sae}, %zmm3, %zmm2 +# INTEL: vcvttps2udqs zmm2, zmm3, {sae} +0x62,0xf5,0x7c,0x18,0x6c,0xd3 + +# ATT: vcvttps2udqs %zmm3, %zmm2 {%k7} +# INTEL: vcvttps2udqs zmm2 {k7}, zmm3 +0x62,0xf5,0x7c,0x4f,0x6c,0xd3 + +# ATT: vcvttps2udqs {sae}, %zmm3, %zmm2 {%k7} {z} +# INTEL: vcvttps2udqs zmm2 {k7} {z}, zmm3, {sae} +0x62,0xf5,0x7c,0x9f,0x6c,0xd3 + +# ATT: vcvttps2udqs 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttps2udqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7c,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2udqs 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttps2udqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7c,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2udqs (%eax){1to4}, %xmm2 +# INTEL: vcvttps2udqs xmm2, dword ptr [eax]{1to4} +0x62,0xf5,0x7c,0x18,0x6c,0x10 + +# ATT: vcvttps2udqs -512(,%ebp,2), %xmm2 +# INTEL: vcvttps2udqs xmm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0x7c,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2udqs 2032(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttps2udqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0x7c,0x8f,0x6c,0x51,0x7f + +# ATT: vcvttps2udqs -512(%edx){1to4}, %xmm2 {%k7} {z} +# INTEL: vcvttps2udqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4} +0x62,0xf5,0x7c,0x9f,0x6c,0x52,0x80 + +# ATT: vcvttps2udqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttps2udqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7c,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2udqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttps2udqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7c,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2udqs (%eax){1to8}, %ymm2 +# INTEL: vcvttps2udqs ymm2, dword ptr [eax]{1to8} +0x62,0xf5,0x7c,0x38,0x6c,0x10 + +# ATT: vcvttps2udqs -1024(,%ebp,2), %ymm2 +# INTEL: vcvttps2udqs ymm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0x7c,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2udqs 4064(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttps2udqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0x7c,0xaf,0x6c,0x51,0x7f + +# ATT: vcvttps2udqs -512(%edx){1to8}, %ymm2 {%k7} {z} +# INTEL: vcvttps2udqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8} +0x62,0xf5,0x7c,0xbf,0x6c,0x52,0x80 + +# ATT: vcvttps2udqs 268435456(%esp,%esi,8), %zmm2 +# INTEL: vcvttps2udqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7c,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2udqs 291(%edi,%eax,4), %zmm2 {%k7} +# INTEL: vcvttps2udqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7c,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2udqs (%eax){1to16}, %zmm2 +# INTEL: vcvttps2udqs zmm2, dword ptr [eax]{1to16} +0x62,0xf5,0x7c,0x58,0x6c,0x10 + +# ATT: vcvttps2udqs -2048(,%ebp,2), %zmm2 +# INTEL: vcvttps2udqs zmm2, zmmword ptr [2*ebp - 2048] +0x62,0xf5,0x7c,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttps2udqs 8128(%ecx), %zmm2 {%k7} {z} +# INTEL: vcvttps2udqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +0x62,0xf5,0x7c,0xcf,0x6c,0x51,0x7f + +# ATT: vcvttps2udqs -512(%edx){1to16}, %zmm2 {%k7} {z} +# INTEL: vcvttps2udqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16} +0x62,0xf5,0x7c,0xdf,0x6c,0x52,0x80 + +# ATT: vcvttps2uqqs %xmm3, %xmm2 +# INTEL: vcvttps2uqqs xmm2, xmm3 +0x62,0xf5,0x7d,0x08,0x6c,0xd3 + +# ATT: vcvttps2uqqs %xmm3, %xmm2 {%k7} +# INTEL: vcvttps2uqqs xmm2 {k7}, xmm3 +0x62,0xf5,0x7d,0x0f,0x6c,0xd3 + +# ATT: vcvttps2uqqs %xmm3, %xmm2 {%k7} {z} +# INTEL: vcvttps2uqqs xmm2 {k7} {z}, xmm3 +0x62,0xf5,0x7d,0x8f,0x6c,0xd3 + +# ATT: vcvttps2uqqs %xmm3, %ymm2 +# INTEL: vcvttps2uqqs ymm2, xmm3 +0x62,0xf5,0x7d,0x28,0x6c,0xd3 + +# ATT: vcvttps2uqqs {sae}, %xmm3, %ymm2 +# INTEL: vcvttps2uqqs ymm2, xmm3, {sae} +0x62,0xf5,0x79,0x18,0x6c,0xd3 + +# ATT: vcvttps2uqqs %xmm3, %ymm2 {%k7} +# INTEL: vcvttps2uqqs ymm2 {k7}, xmm3 +0x62,0xf5,0x7d,0x2f,0x6c,0xd3 + +# ATT: vcvttps2uqqs {sae}, %xmm3, %ymm2 {%k7} {z} +# INTEL: vcvttps2uqqs ymm2 {k7} {z}, xmm3, {sae} +0x62,0xf5,0x79,0x9f,0x6c,0xd3 + +# ATT: vcvttps2uqqs %ymm3, %zmm2 +# INTEL: vcvttps2uqqs zmm2, ymm3 +0x62,0xf5,0x7d,0x48,0x6c,0xd3 + +# ATT: vcvttps2uqqs {sae}, %ymm3, %zmm2 +# INTEL: vcvttps2uqqs zmm2, ymm3, {sae} +0x62,0xf5,0x7d,0x18,0x6c,0xd3 + +# ATT: vcvttps2uqqs %ymm3, %zmm2 {%k7} +# INTEL: vcvttps2uqqs zmm2 {k7}, ymm3 +0x62,0xf5,0x7d,0x4f,0x6c,0xd3 + +# ATT: vcvttps2uqqs {sae}, %ymm3, %zmm2 {%k7} {z} +# INTEL: vcvttps2uqqs zmm2 {k7} {z}, ymm3, {sae} +0x62,0xf5,0x7d,0x9f,0x6c,0xd3 + +# ATT: vcvttps2uqqs 268435456(%esp,%esi,8), %xmm2 +# INTEL: vcvttps2uqqs xmm2, qword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7d,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2uqqs 291(%edi,%eax,4), %xmm2 {%k7} +# INTEL: vcvttps2uqqs xmm2 {k7}, qword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7d,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2uqqs (%eax){1to2}, %xmm2 +# INTEL: vcvttps2uqqs xmm2, dword ptr [eax]{1to2} +0x62,0xf5,0x7d,0x18,0x6c,0x10 + +# ATT: vcvttps2uqqs -256(,%ebp,2), %xmm2 +# INTEL: vcvttps2uqqs xmm2, qword ptr [2*ebp - 256] +0x62,0xf5,0x7d,0x08,0x6c,0x14,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttps2uqqs 1016(%ecx), %xmm2 {%k7} {z} +# INTEL: vcvttps2uqqs xmm2 {k7} {z}, qword ptr [ecx + 1016] +0x62,0xf5,0x7d,0x8f,0x6c,0x51,0x7f + +# ATT: vcvttps2uqqs -512(%edx){1to2}, %xmm2 {%k7} {z} +# INTEL: vcvttps2uqqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to2} +0x62,0xf5,0x7d,0x9f,0x6c,0x52,0x80 + +# ATT: vcvttps2uqqs 268435456(%esp,%esi,8), %ymm2 +# INTEL: vcvttps2uqqs ymm2, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7d,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2uqqs 291(%edi,%eax,4), %ymm2 {%k7} +# INTEL: vcvttps2uqqs ymm2 {k7}, xmmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7d,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2uqqs (%eax){1to4}, %ymm2 +# INTEL: vcvttps2uqqs ymm2, dword ptr [eax]{1to4} +0x62,0xf5,0x7d,0x38,0x6c,0x10 + +# ATT: vcvttps2uqqs -512(,%ebp,2), %ymm2 +# INTEL: vcvttps2uqqs ymm2, xmmword ptr [2*ebp - 512] +0x62,0xf5,0x7d,0x28,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2uqqs 2032(%ecx), %ymm2 {%k7} {z} +# INTEL: vcvttps2uqqs ymm2 {k7} {z}, xmmword ptr [ecx + 2032] +0x62,0xf5,0x7d,0xaf,0x6c,0x51,0x7f + +# ATT: vcvttps2uqqs -512(%edx){1to4}, %ymm2 {%k7} {z} +# INTEL: vcvttps2uqqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to4} +0x62,0xf5,0x7d,0xbf,0x6c,0x52,0x80 + +# ATT: vcvttps2uqqs 268435456(%esp,%esi,8), %zmm2 +# INTEL: vcvttps2uqqs zmm2, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7d,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2uqqs 291(%edi,%eax,4), %zmm2 {%k7} +# INTEL: vcvttps2uqqs zmm2 {k7}, ymmword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7d,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2uqqs (%eax){1to8}, %zmm2 +# INTEL: vcvttps2uqqs zmm2, dword ptr [eax]{1to8} +0x62,0xf5,0x7d,0x58,0x6c,0x10 + +# ATT: vcvttps2uqqs -1024(,%ebp,2), %zmm2 +# INTEL: vcvttps2uqqs zmm2, ymmword ptr [2*ebp - 1024] +0x62,0xf5,0x7d,0x48,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2uqqs 4064(%ecx), %zmm2 {%k7} {z} +# INTEL: vcvttps2uqqs zmm2 {k7} {z}, ymmword ptr [ecx + 4064] +0x62,0xf5,0x7d,0xcf,0x6c,0x51,0x7f + +# ATT: vcvttps2uqqs -512(%edx){1to8}, %zmm2 {%k7} {z} +# INTEL: vcvttps2uqqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to8} +0x62,0xf5,0x7d,0xdf,0x6c,0x52,0x80 + +# ATT: vcvttsd2sis %xmm2, %ecx +# INTEL: vcvttsd2sis ecx, xmm2 +0x62,0xf5,0x7f,0x08,0x6d,0xca + +# ATT: vcvttsd2sis {sae}, %xmm2, %ecx +# INTEL: vcvttsd2sis ecx, xmm2, {sae} +0x62,0xf5,0x7f,0x18,0x6d,0xca + +# ATT: vcvttsd2sis 268435456(%esp,%esi,8), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7f,0x08,0x6d,0x8c,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttsd2sis 291(%edi,%eax,4), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7f,0x08,0x6d,0x8c,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttsd2sis (%eax), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [eax] +0x62,0xf5,0x7f,0x08,0x6d,0x08 + +# ATT: vcvttsd2sis -256(,%ebp,2), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [2*ebp - 256] +0x62,0xf5,0x7f,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttsd2sis 1016(%ecx), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [ecx + 1016] +0x62,0xf5,0x7f,0x08,0x6d,0x49,0x7f + +# ATT: vcvttsd2sis -1024(%edx), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [edx - 1024] +0x62,0xf5,0x7f,0x08,0x6d,0x4a,0x80 + +# ATT: vcvttsd2usis %xmm2, %ecx +# INTEL: vcvttsd2usis ecx, xmm2 +0x62,0xf5,0x7f,0x08,0x6c,0xca + +# ATT: vcvttsd2usis {sae}, %xmm2, %ecx +# INTEL: vcvttsd2usis ecx, xmm2, {sae} +0x62,0xf5,0x7f,0x18,0x6c,0xca + +# ATT: vcvttsd2usis 268435456(%esp,%esi,8), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7f,0x08,0x6c,0x8c,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttsd2usis 291(%edi,%eax,4), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7f,0x08,0x6c,0x8c,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttsd2usis (%eax), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [eax] +0x62,0xf5,0x7f,0x08,0x6c,0x08 + +# ATT: vcvttsd2usis -256(,%ebp,2), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [2*ebp - 256] +0x62,0xf5,0x7f,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttsd2usis 1016(%ecx), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [ecx + 1016] +0x62,0xf5,0x7f,0x08,0x6c,0x49,0x7f + +# ATT: vcvttsd2usis -1024(%edx), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [edx - 1024] +0x62,0xf5,0x7f,0x08,0x6c,0x4a,0x80 + +# ATT: vcvttss2sis %xmm2, %ecx +# INTEL: vcvttss2sis ecx, xmm2 +0x62,0xf5,0x7e,0x08,0x6d,0xca + +# ATT: vcvttss2sis {sae}, %xmm2, %ecx +# INTEL: vcvttss2sis ecx, xmm2, {sae} +0x62,0xf5,0x7e,0x18,0x6d,0xca + +# ATT: vcvttss2sis 268435456(%esp,%esi,8), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7e,0x08,0x6d,0x8c,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttss2sis 291(%edi,%eax,4), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7e,0x08,0x6d,0x8c,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttss2sis (%eax), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [eax] +0x62,0xf5,0x7e,0x08,0x6d,0x08 + +# ATT: vcvttss2sis -128(,%ebp,2), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [2*ebp - 128] +0x62,0xf5,0x7e,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff + +# ATT: vcvttss2sis 508(%ecx), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [ecx + 508] +0x62,0xf5,0x7e,0x08,0x6d,0x49,0x7f + +# ATT: vcvttss2sis -512(%edx), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [edx - 512] +0x62,0xf5,0x7e,0x08,0x6d,0x4a,0x80 + +# ATT: vcvttss2usis %xmm2, %ecx +# INTEL: vcvttss2usis ecx, xmm2 +0x62,0xf5,0x7e,0x08,0x6c,0xca + +# ATT: vcvttss2usis {sae}, %xmm2, %ecx +# INTEL: vcvttss2usis ecx, xmm2, {sae} +0x62,0xf5,0x7e,0x18,0x6c,0xca + +# ATT: vcvttss2usis 268435456(%esp,%esi,8), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [esp + 8*esi + 268435456] +0x62,0xf5,0x7e,0x08,0x6c,0x8c,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vcvttss2usis 291(%edi,%eax,4), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [edi + 4*eax + 291] +0x62,0xf5,0x7e,0x08,0x6c,0x8c,0x87,0x23,0x01,0x00,0x00 + +# ATT: vcvttss2usis (%eax), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [eax] +0x62,0xf5,0x7e,0x08,0x6c,0x08 + +# ATT: vcvttss2usis -128(,%ebp,2), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [2*ebp - 128] +0x62,0xf5,0x7e,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff + +# ATT: vcvttss2usis 508(%ecx), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [ecx + 508] +0x62,0xf5,0x7e,0x08,0x6c,0x49,0x7f + +# ATT: vcvttss2usis -512(%edx), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [edx - 512] +0x62,0xf5,0x7e,0x08,0x6c,0x4a,0x80 + diff --git a/llvm/test/MC/Disassembler/X86/avx10.2-satcvtds-64.txt b/llvm/test/MC/Disassembler/X86/avx10.2-satcvtds-64.txt new file mode 100644 index 0000000..c0c3340 --- /dev/null +++ b/llvm/test/MC/Disassembler/X86/avx10.2-satcvtds-64.txt @@ -0,0 +1,1171 @@ +# RUN: llvm-mc --disassemble %s -triple=x86_64 | FileCheck %s --check-prefixes=ATT +# RUN: llvm-mc --disassemble %s -triple=x86_64 -x86-asm-syntax=intel --output-asm-variant=1 | FileCheck %s --check-prefixes=INTEL + +# ATT: vcvttpd2dqs %xmm23, %xmm22 +# INTEL: vcvttpd2dqs xmm22, xmm23 +0x62,0xa5,0xfc,0x08,0x6d,0xf7 + +# ATT: vcvttpd2dqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttpd2dqs xmm22 {k7}, xmm23 +0x62,0xa5,0xfc,0x0f,0x6d,0xf7 + +# ATT: vcvttpd2dqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttpd2dqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0xfc,0x8f,0x6d,0xf7 + +# ATT: vcvttpd2dqs %ymm23, %xmm22 +# INTEL: vcvttpd2dqs xmm22, ymm23 +0x62,0xa5,0xfc,0x28,0x6d,0xf7 + +# ATT: vcvttpd2dqs {sae}, %ymm23, %xmm22 +# INTEL: vcvttpd2dqs xmm22, ymm23, {sae} +0x62,0xa5,0xf8,0x18,0x6d,0xf7 + +# ATT: vcvttpd2dqs %ymm23, %xmm22 {%k7} +# INTEL: vcvttpd2dqs xmm22 {k7}, ymm23 +0x62,0xa5,0xfc,0x2f,0x6d,0xf7 + +# ATT: vcvttpd2dqs {sae}, %ymm23, %xmm22 {%k7} {z} +# INTEL: vcvttpd2dqs xmm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0xf8,0x9f,0x6d,0xf7 + +# ATT: vcvttpd2dqs %zmm23, %ymm22 +# INTEL: vcvttpd2dqs ymm22, zmm23 +0x62,0xa5,0xfc,0x48,0x6d,0xf7 + +# ATT: vcvttpd2dqs {sae}, %zmm23, %ymm22 +# INTEL: vcvttpd2dqs ymm22, zmm23, {sae} +0x62,0xa5,0xfc,0x18,0x6d,0xf7 + +# ATT: vcvttpd2dqs %zmm23, %ymm22 {%k7} +# INTEL: vcvttpd2dqs ymm22 {k7}, zmm23 +0x62,0xa5,0xfc,0x4f,0x6d,0xf7 + +# ATT: vcvttpd2dqs {sae}, %zmm23, %ymm22 {%k7} {z} +# INTEL: vcvttpd2dqs ymm22 {k7} {z}, zmm23, {sae} +0x62,0xa5,0xfc,0x9f,0x6d,0xf7 + +# ATT: vcvttpd2dqsx 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttpd2dqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfc,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2dqsx 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttpd2dqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfc,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2dqs (%rip){1to2}, %xmm22 +# INTEL: vcvttpd2dqs xmm22, qword ptr [rip]{1to2} +0x62,0xe5,0xfc,0x18,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2dqsx -512(,%rbp,2), %xmm22 +# INTEL: vcvttpd2dqs xmm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0xfc,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2dqsx 2032(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttpd2dqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0xfc,0x8f,0x6d,0x71,0x7f + +# ATT: vcvttpd2dqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +# INTEL: vcvttpd2dqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +0x62,0xe5,0xfc,0x9f,0x6d,0x72,0x80 + +# ATT: vcvttpd2dqs (%rip){1to4}, %xmm22 +# INTEL: vcvttpd2dqs xmm22, qword ptr [rip]{1to4} +0x62,0xe5,0xfc,0x38,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2dqsy -1024(,%rbp,2), %xmm22 +# INTEL: vcvttpd2dqs xmm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0xfc,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2dqsy 4064(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttpd2dqs xmm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0xfc,0xaf,0x6d,0x71,0x7f + +# ATT: vcvttpd2dqs -1024(%rdx){1to4}, %xmm22 {%k7} {z} +# INTEL: vcvttpd2dqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +0x62,0xe5,0xfc,0xbf,0x6d,0x72,0x80 + +# ATT: vcvttpd2dqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttpd2dqs ymm22, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfc,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2dqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttpd2dqs ymm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfc,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2dqs (%rip){1to8}, %ymm22 +# INTEL: vcvttpd2dqs ymm22, qword ptr [rip]{1to8} +0x62,0xe5,0xfc,0x58,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2dqs -2048(,%rbp,2), %ymm22 +# INTEL: vcvttpd2dqs ymm22, zmmword ptr [2*rbp - 2048] +0x62,0xe5,0xfc,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2dqs 8128(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttpd2dqs ymm22 {k7} {z}, zmmword ptr [rcx + 8128] +0x62,0xe5,0xfc,0xcf,0x6d,0x71,0x7f + +# ATT: vcvttpd2dqs -1024(%rdx){1to8}, %ymm22 {%k7} {z} +# INTEL: vcvttpd2dqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +0x62,0xe5,0xfc,0xdf,0x6d,0x72,0x80 + +# ATT: vcvttpd2qqs %xmm23, %xmm22 +# INTEL: vcvttpd2qqs xmm22, xmm23 +0x62,0xa5,0xfd,0x08,0x6d,0xf7 + +# ATT: vcvttpd2qqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttpd2qqs xmm22 {k7}, xmm23 +0x62,0xa5,0xfd,0x0f,0x6d,0xf7 + +# ATT: vcvttpd2qqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttpd2qqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0xfd,0x8f,0x6d,0xf7 + +# ATT: vcvttpd2qqs %ymm23, %ymm22 +# INTEL: vcvttpd2qqs ymm22, ymm23 +0x62,0xa5,0xfd,0x28,0x6d,0xf7 + +# ATT: vcvttpd2qqs {sae}, %ymm23, %ymm22 +# INTEL: vcvttpd2qqs ymm22, ymm23, {sae} +0x62,0xa5,0xf9,0x18,0x6d,0xf7 + +# ATT: vcvttpd2qqs %ymm23, %ymm22 {%k7} +# INTEL: vcvttpd2qqs ymm22 {k7}, ymm23 +0x62,0xa5,0xfd,0x2f,0x6d,0xf7 + +# ATT: vcvttpd2qqs {sae}, %ymm23, %ymm22 {%k7} {z} +# INTEL: vcvttpd2qqs ymm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0xf9,0x9f,0x6d,0xf7 + +# ATT: vcvttpd2qqs %zmm23, %zmm22 +# INTEL: vcvttpd2qqs zmm22, zmm23 +0x62,0xa5,0xfd,0x48,0x6d,0xf7 + +# ATT: vcvttpd2qqs {sae}, %zmm23, %zmm22 +# INTEL: vcvttpd2qqs zmm22, zmm23, {sae} +0x62,0xa5,0xfd,0x18,0x6d,0xf7 + +# ATT: vcvttpd2qqs %zmm23, %zmm22 {%k7} +# INTEL: vcvttpd2qqs zmm22 {k7}, zmm23 +0x62,0xa5,0xfd,0x4f,0x6d,0xf7 + +# ATT: vcvttpd2qqs {sae}, %zmm23, %zmm22 {%k7} {z} +# INTEL: vcvttpd2qqs zmm22 {k7} {z}, zmm23, {sae} +0x62,0xa5,0xfd,0x9f,0x6d,0xf7 + +# ATT: vcvttpd2qqs 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttpd2qqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfd,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2qqs 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttpd2qqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfd,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2qqs (%rip){1to2}, %xmm22 +# INTEL: vcvttpd2qqs xmm22, qword ptr [rip]{1to2} +0x62,0xe5,0xfd,0x18,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2qqs -512(,%rbp,2), %xmm22 +# INTEL: vcvttpd2qqs xmm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0xfd,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2qqs 2032(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttpd2qqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0xfd,0x8f,0x6d,0x71,0x7f + +# ATT: vcvttpd2qqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +# INTEL: vcvttpd2qqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +0x62,0xe5,0xfd,0x9f,0x6d,0x72,0x80 + +# ATT: vcvttpd2qqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttpd2qqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfd,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2qqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttpd2qqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfd,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2qqs (%rip){1to4}, %ymm22 +# INTEL: vcvttpd2qqs ymm22, qword ptr [rip]{1to4} +0x62,0xe5,0xfd,0x38,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2qqs -1024(,%rbp,2), %ymm22 +# INTEL: vcvttpd2qqs ymm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0xfd,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2qqs 4064(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttpd2qqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0xfd,0xaf,0x6d,0x71,0x7f + +# ATT: vcvttpd2qqs -1024(%rdx){1to4}, %ymm22 {%k7} {z} +# INTEL: vcvttpd2qqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +0x62,0xe5,0xfd,0xbf,0x6d,0x72,0x80 + +# ATT: vcvttpd2qqs 268435456(%rbp,%r14,8), %zmm22 +# INTEL: vcvttpd2qqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfd,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2qqs 291(%r8,%rax,4), %zmm22 {%k7} +# INTEL: vcvttpd2qqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfd,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2qqs (%rip){1to8}, %zmm22 +# INTEL: vcvttpd2qqs zmm22, qword ptr [rip]{1to8} +0x62,0xe5,0xfd,0x58,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2qqs -2048(,%rbp,2), %zmm22 +# INTEL: vcvttpd2qqs zmm22, zmmword ptr [2*rbp - 2048] +0x62,0xe5,0xfd,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2qqs 8128(%rcx), %zmm22 {%k7} {z} +# INTEL: vcvttpd2qqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +0x62,0xe5,0xfd,0xcf,0x6d,0x71,0x7f + +# ATT: vcvttpd2qqs -1024(%rdx){1to8}, %zmm22 {%k7} {z} +# INTEL: vcvttpd2qqs zmm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +0x62,0xe5,0xfd,0xdf,0x6d,0x72,0x80 + +# ATT: vcvttpd2udqs %xmm23, %xmm22 +# INTEL: vcvttpd2udqs xmm22, xmm23 +0x62,0xa5,0xfc,0x08,0x6c,0xf7 + +# ATT: vcvttpd2udqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttpd2udqs xmm22 {k7}, xmm23 +0x62,0xa5,0xfc,0x0f,0x6c,0xf7 + +# ATT: vcvttpd2udqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttpd2udqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0xfc,0x8f,0x6c,0xf7 + +# ATT: vcvttpd2udqs %ymm23, %xmm22 +# INTEL: vcvttpd2udqs xmm22, ymm23 +0x62,0xa5,0xfc,0x28,0x6c,0xf7 + +# ATT: vcvttpd2udqs {sae}, %ymm23, %xmm22 +# INTEL: vcvttpd2udqs xmm22, ymm23, {sae} +0x62,0xa5,0xf8,0x18,0x6c,0xf7 + +# ATT: vcvttpd2udqs %ymm23, %xmm22 {%k7} +# INTEL: vcvttpd2udqs xmm22 {k7}, ymm23 +0x62,0xa5,0xfc,0x2f,0x6c,0xf7 + +# ATT: vcvttpd2udqs {sae}, %ymm23, %xmm22 {%k7} {z} +# INTEL: vcvttpd2udqs xmm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0xf8,0x9f,0x6c,0xf7 + +# ATT: vcvttpd2udqs %zmm23, %ymm22 +# INTEL: vcvttpd2udqs ymm22, zmm23 +0x62,0xa5,0xfc,0x48,0x6c,0xf7 + +# ATT: vcvttpd2udqs {sae}, %zmm23, %ymm22 +# INTEL: vcvttpd2udqs ymm22, zmm23, {sae} +0x62,0xa5,0xfc,0x18,0x6c,0xf7 + +# ATT: vcvttpd2udqs %zmm23, %ymm22 {%k7} +# INTEL: vcvttpd2udqs ymm22 {k7}, zmm23 +0x62,0xa5,0xfc,0x4f,0x6c,0xf7 + +# ATT: vcvttpd2udqs {sae}, %zmm23, %ymm22 {%k7} {z} +# INTEL: vcvttpd2udqs ymm22 {k7} {z}, zmm23, {sae} +0x62,0xa5,0xfc,0x9f,0x6c,0xf7 + +# ATT: vcvttpd2udqsx 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttpd2udqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfc,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2udqsx 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttpd2udqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfc,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2udqs (%rip){1to2}, %xmm22 +# INTEL: vcvttpd2udqs xmm22, qword ptr [rip]{1to2} +0x62,0xe5,0xfc,0x18,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2udqsx -512(,%rbp,2), %xmm22 +# INTEL: vcvttpd2udqs xmm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0xfc,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2udqsx 2032(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttpd2udqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0xfc,0x8f,0x6c,0x71,0x7f + +# ATT: vcvttpd2udqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +# INTEL: vcvttpd2udqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +0x62,0xe5,0xfc,0x9f,0x6c,0x72,0x80 + +# ATT: vcvttpd2udqs (%rip){1to4}, %xmm22 +# INTEL: vcvttpd2udqs xmm22, qword ptr [rip]{1to4} +0x62,0xe5,0xfc,0x38,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2udqsy -1024(,%rbp,2), %xmm22 +# INTEL: vcvttpd2udqs xmm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0xfc,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2udqsy 4064(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttpd2udqs xmm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0xfc,0xaf,0x6c,0x71,0x7f + +# ATT: vcvttpd2udqs -1024(%rdx){1to4}, %xmm22 {%k7} {z} +# INTEL: vcvttpd2udqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +0x62,0xe5,0xfc,0xbf,0x6c,0x72,0x80 + +# ATT: vcvttpd2udqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttpd2udqs ymm22, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfc,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2udqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttpd2udqs ymm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfc,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2udqs (%rip){1to8}, %ymm22 +# INTEL: vcvttpd2udqs ymm22, qword ptr [rip]{1to8} +0x62,0xe5,0xfc,0x58,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2udqs -2048(,%rbp,2), %ymm22 +# INTEL: vcvttpd2udqs ymm22, zmmword ptr [2*rbp - 2048] +0x62,0xe5,0xfc,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2udqs 8128(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttpd2udqs ymm22 {k7} {z}, zmmword ptr [rcx + 8128] +0x62,0xe5,0xfc,0xcf,0x6c,0x71,0x7f + +# ATT: vcvttpd2udqs -1024(%rdx){1to8}, %ymm22 {%k7} {z} +# INTEL: vcvttpd2udqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +0x62,0xe5,0xfc,0xdf,0x6c,0x72,0x80 + +# ATT: vcvttpd2uqqs %xmm23, %xmm22 +# INTEL: vcvttpd2uqqs xmm22, xmm23 +0x62,0xa5,0xfd,0x08,0x6c,0xf7 + +# ATT: vcvttpd2uqqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttpd2uqqs xmm22 {k7}, xmm23 +0x62,0xa5,0xfd,0x0f,0x6c,0xf7 + +# ATT: vcvttpd2uqqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttpd2uqqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0xfd,0x8f,0x6c,0xf7 + +# ATT: vcvttpd2uqqs %ymm23, %ymm22 +# INTEL: vcvttpd2uqqs ymm22, ymm23 +0x62,0xa5,0xfd,0x28,0x6c,0xf7 + +# ATT: vcvttpd2uqqs {sae}, %ymm23, %ymm22 +# INTEL: vcvttpd2uqqs ymm22, ymm23, {sae} +0x62,0xa5,0xf9,0x18,0x6c,0xf7 + +# ATT: vcvttpd2uqqs %ymm23, %ymm22 {%k7} +# INTEL: vcvttpd2uqqs ymm22 {k7}, ymm23 +0x62,0xa5,0xfd,0x2f,0x6c,0xf7 + +# ATT: vcvttpd2uqqs {sae}, %ymm23, %ymm22 {%k7} {z} +# INTEL: vcvttpd2uqqs ymm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0xf9,0x9f,0x6c,0xf7 + +# ATT: vcvttpd2uqqs %zmm23, %zmm22 +# INTEL: vcvttpd2uqqs zmm22, zmm23 +0x62,0xa5,0xfd,0x48,0x6c,0xf7 + +# ATT: vcvttpd2uqqs {sae}, %zmm23, %zmm22 +# INTEL: vcvttpd2uqqs zmm22, zmm23, {sae} +0x62,0xa5,0xfd,0x18,0x6c,0xf7 + +# ATT: vcvttpd2uqqs %zmm23, %zmm22 {%k7} +# INTEL: vcvttpd2uqqs zmm22 {k7}, zmm23 +0x62,0xa5,0xfd,0x4f,0x6c,0xf7 + +# ATT: vcvttpd2uqqs {sae}, %zmm23, %zmm22 {%k7} {z} +# INTEL: vcvttpd2uqqs zmm22 {k7} {z}, zmm23, {sae} +0x62,0xa5,0xfd,0x9f,0x6c,0xf7 + +# ATT: vcvttpd2uqqs 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttpd2uqqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfd,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2uqqs 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttpd2uqqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfd,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2uqqs (%rip){1to2}, %xmm22 +# INTEL: vcvttpd2uqqs xmm22, qword ptr [rip]{1to2} +0x62,0xe5,0xfd,0x18,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2uqqs -512(,%rbp,2), %xmm22 +# INTEL: vcvttpd2uqqs xmm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0xfd,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttpd2uqqs 2032(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttpd2uqqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0xfd,0x8f,0x6c,0x71,0x7f + +# ATT: vcvttpd2uqqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +# INTEL: vcvttpd2uqqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +0x62,0xe5,0xfd,0x9f,0x6c,0x72,0x80 + +# ATT: vcvttpd2uqqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttpd2uqqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfd,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2uqqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttpd2uqqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfd,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2uqqs (%rip){1to4}, %ymm22 +# INTEL: vcvttpd2uqqs ymm22, qword ptr [rip]{1to4} +0x62,0xe5,0xfd,0x38,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2uqqs -1024(,%rbp,2), %ymm22 +# INTEL: vcvttpd2uqqs ymm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0xfd,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttpd2uqqs 4064(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttpd2uqqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0xfd,0xaf,0x6c,0x71,0x7f + +# ATT: vcvttpd2uqqs -1024(%rdx){1to4}, %ymm22 {%k7} {z} +# INTEL: vcvttpd2uqqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +0x62,0xe5,0xfd,0xbf,0x6c,0x72,0x80 + +# ATT: vcvttpd2uqqs 268435456(%rbp,%r14,8), %zmm22 +# INTEL: vcvttpd2uqqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0xfd,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttpd2uqqs 291(%r8,%rax,4), %zmm22 {%k7} +# INTEL: vcvttpd2uqqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0xfd,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttpd2uqqs (%rip){1to8}, %zmm22 +# INTEL: vcvttpd2uqqs zmm22, qword ptr [rip]{1to8} +0x62,0xe5,0xfd,0x58,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttpd2uqqs -2048(,%rbp,2), %zmm22 +# INTEL: vcvttpd2uqqs zmm22, zmmword ptr [2*rbp - 2048] +0x62,0xe5,0xfd,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttpd2uqqs 8128(%rcx), %zmm22 {%k7} {z} +# INTEL: vcvttpd2uqqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +0x62,0xe5,0xfd,0xcf,0x6c,0x71,0x7f + +# ATT: vcvttpd2uqqs -1024(%rdx){1to8}, %zmm22 {%k7} {z} +# INTEL: vcvttpd2uqqs zmm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +0x62,0xe5,0xfd,0xdf,0x6c,0x72,0x80 + +# ATT: vcvttps2dqs %xmm23, %xmm22 +# INTEL: vcvttps2dqs xmm22, xmm23 +0x62,0xa5,0x7c,0x08,0x6d,0xf7 + +# ATT: vcvttps2dqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttps2dqs xmm22 {k7}, xmm23 +0x62,0xa5,0x7c,0x0f,0x6d,0xf7 + +# ATT: vcvttps2dqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttps2dqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0x7c,0x8f,0x6d,0xf7 + +# ATT: vcvttps2dqs %ymm23, %ymm22 +# INTEL: vcvttps2dqs ymm22, ymm23 +0x62,0xa5,0x7c,0x28,0x6d,0xf7 + +# ATT: vcvttps2dqs {sae}, %ymm23, %ymm22 +# INTEL: vcvttps2dqs ymm22, ymm23, {sae} +0x62,0xa5,0x78,0x18,0x6d,0xf7 + +# ATT: vcvttps2dqs %ymm23, %ymm22 {%k7} +# INTEL: vcvttps2dqs ymm22 {k7}, ymm23 +0x62,0xa5,0x7c,0x2f,0x6d,0xf7 + +# ATT: vcvttps2dqs {sae}, %ymm23, %ymm22 {%k7} {z} +# INTEL: vcvttps2dqs ymm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0x78,0x9f,0x6d,0xf7 + +# ATT: vcvttps2dqs %zmm23, %zmm22 +# INTEL: vcvttps2dqs zmm22, zmm23 +0x62,0xa5,0x7c,0x48,0x6d,0xf7 + +# ATT: vcvttps2dqs {sae}, %zmm23, %zmm22 +# INTEL: vcvttps2dqs zmm22, zmm23, {sae} +0x62,0xa5,0x7c,0x18,0x6d,0xf7 + +# ATT: vcvttps2dqs %zmm23, %zmm22 {%k7} +# INTEL: vcvttps2dqs zmm22 {k7}, zmm23 +0x62,0xa5,0x7c,0x4f,0x6d,0xf7 + +# ATT: vcvttps2dqs {sae}, %zmm23, %zmm22 {%k7} {z} +# INTEL: vcvttps2dqs zmm22 {k7} {z}, zmm23, {sae} +0x62,0xa5,0x7c,0x9f,0x6d,0xf7 + +# ATT: vcvttps2dqs 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttps2dqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7c,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2dqs 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttps2dqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7c,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2dqs (%rip){1to4}, %xmm22 +# INTEL: vcvttps2dqs xmm22, dword ptr [rip]{1to4} +0x62,0xe5,0x7c,0x18,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2dqs -512(,%rbp,2), %xmm22 +# INTEL: vcvttps2dqs xmm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0x7c,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2dqs 2032(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttps2dqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0x7c,0x8f,0x6d,0x71,0x7f + +# ATT: vcvttps2dqs -512(%rdx){1to4}, %xmm22 {%k7} {z} +# INTEL: vcvttps2dqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +0x62,0xe5,0x7c,0x9f,0x6d,0x72,0x80 + +# ATT: vcvttps2dqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttps2dqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7c,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2dqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttps2dqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7c,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2dqs (%rip){1to8}, %ymm22 +# INTEL: vcvttps2dqs ymm22, dword ptr [rip]{1to8} +0x62,0xe5,0x7c,0x38,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2dqs -1024(,%rbp,2), %ymm22 +# INTEL: vcvttps2dqs ymm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0x7c,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2dqs 4064(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttps2dqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0x7c,0xaf,0x6d,0x71,0x7f + +# ATT: vcvttps2dqs -512(%rdx){1to8}, %ymm22 {%k7} {z} +# INTEL: vcvttps2dqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +0x62,0xe5,0x7c,0xbf,0x6d,0x72,0x80 + +# ATT: vcvttps2dqs 268435456(%rbp,%r14,8), %zmm22 +# INTEL: vcvttps2dqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7c,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2dqs 291(%r8,%rax,4), %zmm22 {%k7} +# INTEL: vcvttps2dqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7c,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2dqs (%rip){1to16}, %zmm22 +# INTEL: vcvttps2dqs zmm22, dword ptr [rip]{1to16} +0x62,0xe5,0x7c,0x58,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2dqs -2048(,%rbp,2), %zmm22 +# INTEL: vcvttps2dqs zmm22, zmmword ptr [2*rbp - 2048] +0x62,0xe5,0x7c,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttps2dqs 8128(%rcx), %zmm22 {%k7} {z} +# INTEL: vcvttps2dqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +0x62,0xe5,0x7c,0xcf,0x6d,0x71,0x7f + +# ATT: vcvttps2dqs -512(%rdx){1to16}, %zmm22 {%k7} {z} +# INTEL: vcvttps2dqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16} +0x62,0xe5,0x7c,0xdf,0x6d,0x72,0x80 + +# ATT: vcvttps2qqs %xmm23, %xmm22 +# INTEL: vcvttps2qqs xmm22, xmm23 +0x62,0xa5,0x7d,0x08,0x6d,0xf7 + +# ATT: vcvttps2qqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttps2qqs xmm22 {k7}, xmm23 +0x62,0xa5,0x7d,0x0f,0x6d,0xf7 + +# ATT: vcvttps2qqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttps2qqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0x7d,0x8f,0x6d,0xf7 + +# ATT: vcvttps2qqs %xmm23, %ymm22 +# INTEL: vcvttps2qqs ymm22, xmm23 +0x62,0xa5,0x7d,0x28,0x6d,0xf7 + +# ATT: vcvttps2qqs {sae}, %xmm23, %ymm22 +# INTEL: vcvttps2qqs ymm22, xmm23, {sae} +0x62,0xa5,0x79,0x18,0x6d,0xf7 + +# ATT: vcvttps2qqs %xmm23, %ymm22 {%k7} +# INTEL: vcvttps2qqs ymm22 {k7}, xmm23 +0x62,0xa5,0x7d,0x2f,0x6d,0xf7 + +# ATT: vcvttps2qqs {sae}, %xmm23, %ymm22 {%k7} {z} +# INTEL: vcvttps2qqs ymm22 {k7} {z}, xmm23, {sae} +0x62,0xa5,0x79,0x9f,0x6d,0xf7 + +# ATT: vcvttps2qqs %ymm23, %zmm22 +# INTEL: vcvttps2qqs zmm22, ymm23 +0x62,0xa5,0x7d,0x48,0x6d,0xf7 + +# ATT: vcvttps2qqs {sae}, %ymm23, %zmm22 +# INTEL: vcvttps2qqs zmm22, ymm23, {sae} +0x62,0xa5,0x7d,0x18,0x6d,0xf7 + +# ATT: vcvttps2qqs %ymm23, %zmm22 {%k7} +# INTEL: vcvttps2qqs zmm22 {k7}, ymm23 +0x62,0xa5,0x7d,0x4f,0x6d,0xf7 + +# ATT: vcvttps2qqs {sae}, %ymm23, %zmm22 {%k7} {z} +# INTEL: vcvttps2qqs zmm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0x7d,0x9f,0x6d,0xf7 + +# ATT: vcvttps2qqs 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttps2qqs xmm22, qword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7d,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2qqs 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttps2qqs xmm22 {k7}, qword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7d,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2qqs (%rip){1to2}, %xmm22 +# INTEL: vcvttps2qqs xmm22, dword ptr [rip]{1to2} +0x62,0xe5,0x7d,0x18,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2qqs -256(,%rbp,2), %xmm22 +# INTEL: vcvttps2qqs xmm22, qword ptr [2*rbp - 256] +0x62,0xe5,0x7d,0x08,0x6d,0x34,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttps2qqs 1016(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttps2qqs xmm22 {k7} {z}, qword ptr [rcx + 1016] +0x62,0xe5,0x7d,0x8f,0x6d,0x71,0x7f + +# ATT: vcvttps2qqs -512(%rdx){1to2}, %xmm22 {%k7} {z} +# INTEL: vcvttps2qqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to2} +0x62,0xe5,0x7d,0x9f,0x6d,0x72,0x80 + +# ATT: vcvttps2qqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttps2qqs ymm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7d,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2qqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttps2qqs ymm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7d,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2qqs (%rip){1to4}, %ymm22 +# INTEL: vcvttps2qqs ymm22, dword ptr [rip]{1to4} +0x62,0xe5,0x7d,0x38,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2qqs -512(,%rbp,2), %ymm22 +# INTEL: vcvttps2qqs ymm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0x7d,0x28,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2qqs 2032(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttps2qqs ymm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0x7d,0xaf,0x6d,0x71,0x7f + +# ATT: vcvttps2qqs -512(%rdx){1to4}, %ymm22 {%k7} {z} +# INTEL: vcvttps2qqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +0x62,0xe5,0x7d,0xbf,0x6d,0x72,0x80 + +# ATT: vcvttps2qqs 268435456(%rbp,%r14,8), %zmm22 +# INTEL: vcvttps2qqs zmm22, ymmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7d,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2qqs 291(%r8,%rax,4), %zmm22 {%k7} +# INTEL: vcvttps2qqs zmm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7d,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2qqs (%rip){1to8}, %zmm22 +# INTEL: vcvttps2qqs zmm22, dword ptr [rip]{1to8} +0x62,0xe5,0x7d,0x58,0x6d,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2qqs -1024(,%rbp,2), %zmm22 +# INTEL: vcvttps2qqs zmm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0x7d,0x48,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2qqs 4064(%rcx), %zmm22 {%k7} {z} +# INTEL: vcvttps2qqs zmm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0x7d,0xcf,0x6d,0x71,0x7f + +# ATT: vcvttps2qqs -512(%rdx){1to8}, %zmm22 {%k7} {z} +# INTEL: vcvttps2qqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +0x62,0xe5,0x7d,0xdf,0x6d,0x72,0x80 + +# ATT: vcvttps2udqs %xmm23, %xmm22 +# INTEL: vcvttps2udqs xmm22, xmm23 +0x62,0xa5,0x7c,0x08,0x6c,0xf7 + +# ATT: vcvttps2udqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttps2udqs xmm22 {k7}, xmm23 +0x62,0xa5,0x7c,0x0f,0x6c,0xf7 + +# ATT: vcvttps2udqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttps2udqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0x7c,0x8f,0x6c,0xf7 + +# ATT: vcvttps2udqs %ymm23, %ymm22 +# INTEL: vcvttps2udqs ymm22, ymm23 +0x62,0xa5,0x7c,0x28,0x6c,0xf7 + +# ATT: vcvttps2udqs {sae}, %ymm23, %ymm22 +# INTEL: vcvttps2udqs ymm22, ymm23, {sae} +0x62,0xa5,0x78,0x18,0x6c,0xf7 + +# ATT: vcvttps2udqs %ymm23, %ymm22 {%k7} +# INTEL: vcvttps2udqs ymm22 {k7}, ymm23 +0x62,0xa5,0x7c,0x2f,0x6c,0xf7 + +# ATT: vcvttps2udqs {sae}, %ymm23, %ymm22 {%k7} {z} +# INTEL: vcvttps2udqs ymm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0x78,0x9f,0x6c,0xf7 + +# ATT: vcvttps2udqs %zmm23, %zmm22 +# INTEL: vcvttps2udqs zmm22, zmm23 +0x62,0xa5,0x7c,0x48,0x6c,0xf7 + +# ATT: vcvttps2udqs {sae}, %zmm23, %zmm22 +# INTEL: vcvttps2udqs zmm22, zmm23, {sae} +0x62,0xa5,0x7c,0x18,0x6c,0xf7 + +# ATT: vcvttps2udqs %zmm23, %zmm22 {%k7} +# INTEL: vcvttps2udqs zmm22 {k7}, zmm23 +0x62,0xa5,0x7c,0x4f,0x6c,0xf7 + +# ATT: vcvttps2udqs {sae}, %zmm23, %zmm22 {%k7} {z} +# INTEL: vcvttps2udqs zmm22 {k7} {z}, zmm23, {sae} +0x62,0xa5,0x7c,0x9f,0x6c,0xf7 + +# ATT: vcvttps2udqs 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttps2udqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7c,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2udqs 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttps2udqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7c,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2udqs (%rip){1to4}, %xmm22 +# INTEL: vcvttps2udqs xmm22, dword ptr [rip]{1to4} +0x62,0xe5,0x7c,0x18,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2udqs -512(,%rbp,2), %xmm22 +# INTEL: vcvttps2udqs xmm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0x7c,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2udqs 2032(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttps2udqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0x7c,0x8f,0x6c,0x71,0x7f + +# ATT: vcvttps2udqs -512(%rdx){1to4}, %xmm22 {%k7} {z} +# INTEL: vcvttps2udqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +0x62,0xe5,0x7c,0x9f,0x6c,0x72,0x80 + +# ATT: vcvttps2udqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttps2udqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7c,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2udqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttps2udqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7c,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2udqs (%rip){1to8}, %ymm22 +# INTEL: vcvttps2udqs ymm22, dword ptr [rip]{1to8} +0x62,0xe5,0x7c,0x38,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2udqs -1024(,%rbp,2), %ymm22 +# INTEL: vcvttps2udqs ymm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0x7c,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2udqs 4064(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttps2udqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0x7c,0xaf,0x6c,0x71,0x7f + +# ATT: vcvttps2udqs -512(%rdx){1to8}, %ymm22 {%k7} {z} +# INTEL: vcvttps2udqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +0x62,0xe5,0x7c,0xbf,0x6c,0x72,0x80 + +# ATT: vcvttps2udqs 268435456(%rbp,%r14,8), %zmm22 +# INTEL: vcvttps2udqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7c,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2udqs 291(%r8,%rax,4), %zmm22 {%k7} +# INTEL: vcvttps2udqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7c,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2udqs (%rip){1to16}, %zmm22 +# INTEL: vcvttps2udqs zmm22, dword ptr [rip]{1to16} +0x62,0xe5,0x7c,0x58,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2udqs -2048(,%rbp,2), %zmm22 +# INTEL: vcvttps2udqs zmm22, zmmword ptr [2*rbp - 2048] +0x62,0xe5,0x7c,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff + +# ATT: vcvttps2udqs 8128(%rcx), %zmm22 {%k7} {z} +# INTEL: vcvttps2udqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +0x62,0xe5,0x7c,0xcf,0x6c,0x71,0x7f + +# ATT: vcvttps2udqs -512(%rdx){1to16}, %zmm22 {%k7} {z} +# INTEL: vcvttps2udqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16} +0x62,0xe5,0x7c,0xdf,0x6c,0x72,0x80 + +# ATT: vcvttps2uqqs %xmm23, %xmm22 +# INTEL: vcvttps2uqqs xmm22, xmm23 +0x62,0xa5,0x7d,0x08,0x6c,0xf7 + +# ATT: vcvttps2uqqs %xmm23, %xmm22 {%k7} +# INTEL: vcvttps2uqqs xmm22 {k7}, xmm23 +0x62,0xa5,0x7d,0x0f,0x6c,0xf7 + +# ATT: vcvttps2uqqs %xmm23, %xmm22 {%k7} {z} +# INTEL: vcvttps2uqqs xmm22 {k7} {z}, xmm23 +0x62,0xa5,0x7d,0x8f,0x6c,0xf7 + +# ATT: vcvttps2uqqs %xmm23, %ymm22 +# INTEL: vcvttps2uqqs ymm22, xmm23 +0x62,0xa5,0x7d,0x28,0x6c,0xf7 + +# ATT: vcvttps2uqqs {sae}, %xmm23, %ymm22 +# INTEL: vcvttps2uqqs ymm22, xmm23, {sae} +0x62,0xa5,0x79,0x18,0x6c,0xf7 + +# ATT: vcvttps2uqqs %xmm23, %ymm22 {%k7} +# INTEL: vcvttps2uqqs ymm22 {k7}, xmm23 +0x62,0xa5,0x7d,0x2f,0x6c,0xf7 + +# ATT: vcvttps2uqqs {sae}, %xmm23, %ymm22 {%k7} {z} +# INTEL: vcvttps2uqqs ymm22 {k7} {z}, xmm23, {sae} +0x62,0xa5,0x79,0x9f,0x6c,0xf7 + +# ATT: vcvttps2uqqs %ymm23, %zmm22 +# INTEL: vcvttps2uqqs zmm22, ymm23 +0x62,0xa5,0x7d,0x48,0x6c,0xf7 + +# ATT: vcvttps2uqqs {sae}, %ymm23, %zmm22 +# INTEL: vcvttps2uqqs zmm22, ymm23, {sae} +0x62,0xa5,0x7d,0x18,0x6c,0xf7 + +# ATT: vcvttps2uqqs %ymm23, %zmm22 {%k7} +# INTEL: vcvttps2uqqs zmm22 {k7}, ymm23 +0x62,0xa5,0x7d,0x4f,0x6c,0xf7 + +# ATT: vcvttps2uqqs {sae}, %ymm23, %zmm22 {%k7} {z} +# INTEL: vcvttps2uqqs zmm22 {k7} {z}, ymm23, {sae} +0x62,0xa5,0x7d,0x9f,0x6c,0xf7 + +# ATT: vcvttps2uqqs 268435456(%rbp,%r14,8), %xmm22 +# INTEL: vcvttps2uqqs xmm22, qword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7d,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2uqqs 291(%r8,%rax,4), %xmm22 {%k7} +# INTEL: vcvttps2uqqs xmm22 {k7}, qword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7d,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2uqqs (%rip){1to2}, %xmm22 +# INTEL: vcvttps2uqqs xmm22, dword ptr [rip]{1to2} +0x62,0xe5,0x7d,0x18,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2uqqs -256(,%rbp,2), %xmm22 +# INTEL: vcvttps2uqqs xmm22, qword ptr [2*rbp - 256] +0x62,0xe5,0x7d,0x08,0x6c,0x34,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttps2uqqs 1016(%rcx), %xmm22 {%k7} {z} +# INTEL: vcvttps2uqqs xmm22 {k7} {z}, qword ptr [rcx + 1016] +0x62,0xe5,0x7d,0x8f,0x6c,0x71,0x7f + +# ATT: vcvttps2uqqs -512(%rdx){1to2}, %xmm22 {%k7} {z} +# INTEL: vcvttps2uqqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to2} +0x62,0xe5,0x7d,0x9f,0x6c,0x72,0x80 + +# ATT: vcvttps2uqqs 268435456(%rbp,%r14,8), %ymm22 +# INTEL: vcvttps2uqqs ymm22, xmmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7d,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2uqqs 291(%r8,%rax,4), %ymm22 {%k7} +# INTEL: vcvttps2uqqs ymm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7d,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2uqqs (%rip){1to4}, %ymm22 +# INTEL: vcvttps2uqqs ymm22, dword ptr [rip]{1to4} +0x62,0xe5,0x7d,0x38,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2uqqs -512(,%rbp,2), %ymm22 +# INTEL: vcvttps2uqqs ymm22, xmmword ptr [2*rbp - 512] +0x62,0xe5,0x7d,0x28,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff + +# ATT: vcvttps2uqqs 2032(%rcx), %ymm22 {%k7} {z} +# INTEL: vcvttps2uqqs ymm22 {k7} {z}, xmmword ptr [rcx + 2032] +0x62,0xe5,0x7d,0xaf,0x6c,0x71,0x7f + +# ATT: vcvttps2uqqs -512(%rdx){1to4}, %ymm22 {%k7} {z} +# INTEL: vcvttps2uqqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +0x62,0xe5,0x7d,0xbf,0x6c,0x72,0x80 + +# ATT: vcvttps2uqqs 268435456(%rbp,%r14,8), %zmm22 +# INTEL: vcvttps2uqqs zmm22, ymmword ptr [rbp + 8*r14 + 268435456] +0x62,0xa5,0x7d,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttps2uqqs 291(%r8,%rax,4), %zmm22 {%k7} +# INTEL: vcvttps2uqqs zmm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +0x62,0xc5,0x7d,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttps2uqqs (%rip){1to8}, %zmm22 +# INTEL: vcvttps2uqqs zmm22, dword ptr [rip]{1to8} +0x62,0xe5,0x7d,0x58,0x6c,0x35,0x00,0x00,0x00,0x00 + +# ATT: vcvttps2uqqs -1024(,%rbp,2), %zmm22 +# INTEL: vcvttps2uqqs zmm22, ymmword ptr [2*rbp - 1024] +0x62,0xe5,0x7d,0x48,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff + +# ATT: vcvttps2uqqs 4064(%rcx), %zmm22 {%k7} {z} +# INTEL: vcvttps2uqqs zmm22 {k7} {z}, ymmword ptr [rcx + 4064] +0x62,0xe5,0x7d,0xcf,0x6c,0x71,0x7f + +# ATT: vcvttps2uqqs -512(%rdx){1to8}, %zmm22 {%k7} {z} +# INTEL: vcvttps2uqqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +0x62,0xe5,0x7d,0xdf,0x6c,0x72,0x80 + +# ATT: vcvttsd2sis %xmm22, %ecx +# INTEL: vcvttsd2sis ecx, xmm22 +0x62,0xb5,0x7f,0x08,0x6d,0xce + +# ATT: vcvttsd2sis {sae}, %xmm22, %ecx +# INTEL: vcvttsd2sis ecx, xmm22, {sae} +0x62,0xb5,0x7f,0x18,0x6d,0xce + +# ATT: vcvttsd2sis %xmm22, %r9 +# INTEL: vcvttsd2sis r9, xmm22 +0x62,0x35,0xff,0x08,0x6d,0xce + +# ATT: vcvttsd2sis {sae}, %xmm22, %r9 +# INTEL: vcvttsd2sis r9, xmm22, {sae} +0x62,0x35,0xff,0x18,0x6d,0xce + +# ATT: vcvttsd2sis 268435456(%rbp,%r14,8), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [rbp + 8*r14 + 268435456] +0x62,0xb5,0x7f,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttsd2sis 291(%r8,%rax,4), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [r8 + 4*rax + 291] +0x62,0xd5,0x7f,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttsd2sis (%rip), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [rip] +0x62,0xf5,0x7f,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttsd2sis -256(,%rbp,2), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [2*rbp - 256] +0x62,0xf5,0x7f,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttsd2sis 1016(%rcx), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [rcx + 1016] +0x62,0xf5,0x7f,0x08,0x6d,0x49,0x7f + +# ATT: vcvttsd2sis -1024(%rdx), %ecx +# INTEL: vcvttsd2sis ecx, qword ptr [rdx - 1024] +0x62,0xf5,0x7f,0x08,0x6d,0x4a,0x80 + +# ATT: vcvttsd2sis 268435456(%rbp,%r14,8), %r9 +# INTEL: vcvttsd2sis r9, qword ptr [rbp + 8*r14 + 268435456] +0x62,0x35,0xff,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttsd2sis 291(%r8,%rax,4), %r9 +# INTEL: vcvttsd2sis r9, qword ptr [r8 + 4*rax + 291] +0x62,0x55,0xff,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttsd2sis (%rip), %r9 +# INTEL: vcvttsd2sis r9, qword ptr [rip] +0x62,0x75,0xff,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttsd2sis -256(,%rbp,2), %r9 +# INTEL: vcvttsd2sis r9, qword ptr [2*rbp - 256] +0x62,0x75,0xff,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttsd2sis 1016(%rcx), %r9 +# INTEL: vcvttsd2sis r9, qword ptr [rcx + 1016] +0x62,0x75,0xff,0x08,0x6d,0x49,0x7f + +# ATT: vcvttsd2sis -1024(%rdx), %r9 +# INTEL: vcvttsd2sis r9, qword ptr [rdx - 1024] +0x62,0x75,0xff,0x08,0x6d,0x4a,0x80 + +# ATT: vcvttsd2usis %xmm22, %ecx +# INTEL: vcvttsd2usis ecx, xmm22 +0x62,0xb5,0x7f,0x08,0x6c,0xce + +# ATT: vcvttsd2usis {sae}, %xmm22, %ecx +# INTEL: vcvttsd2usis ecx, xmm22, {sae} +0x62,0xb5,0x7f,0x18,0x6c,0xce + +# ATT: vcvttsd2usis %xmm22, %r9 +# INTEL: vcvttsd2usis r9, xmm22 +0x62,0x35,0xff,0x08,0x6c,0xce + +# ATT: vcvttsd2usis {sae}, %xmm22, %r9 +# INTEL: vcvttsd2usis r9, xmm22, {sae} +0x62,0x35,0xff,0x18,0x6c,0xce + +# ATT: vcvttsd2usis 268435456(%rbp,%r14,8), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [rbp + 8*r14 + 268435456] +0x62,0xb5,0x7f,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttsd2usis 291(%r8,%rax,4), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [r8 + 4*rax + 291] +0x62,0xd5,0x7f,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttsd2usis (%rip), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [rip] +0x62,0xf5,0x7f,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttsd2usis -256(,%rbp,2), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [2*rbp - 256] +0x62,0xf5,0x7f,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttsd2usis 1016(%rcx), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [rcx + 1016] +0x62,0xf5,0x7f,0x08,0x6c,0x49,0x7f + +# ATT: vcvttsd2usis -1024(%rdx), %ecx +# INTEL: vcvttsd2usis ecx, qword ptr [rdx - 1024] +0x62,0xf5,0x7f,0x08,0x6c,0x4a,0x80 + +# ATT: vcvttsd2usis 268435456(%rbp,%r14,8), %r9 +# INTEL: vcvttsd2usis r9, qword ptr [rbp + 8*r14 + 268435456] +0x62,0x35,0xff,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttsd2usis 291(%r8,%rax,4), %r9 +# INTEL: vcvttsd2usis r9, qword ptr [r8 + 4*rax + 291] +0x62,0x55,0xff,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttsd2usis (%rip), %r9 +# INTEL: vcvttsd2usis r9, qword ptr [rip] +0x62,0x75,0xff,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttsd2usis -256(,%rbp,2), %r9 +# INTEL: vcvttsd2usis r9, qword ptr [2*rbp - 256] +0x62,0x75,0xff,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff + +# ATT: vcvttsd2usis 1016(%rcx), %r9 +# INTEL: vcvttsd2usis r9, qword ptr [rcx + 1016] +0x62,0x75,0xff,0x08,0x6c,0x49,0x7f + +# ATT: vcvttsd2usis -1024(%rdx), %r9 +# INTEL: vcvttsd2usis r9, qword ptr [rdx - 1024] +0x62,0x75,0xff,0x08,0x6c,0x4a,0x80 + +# ATT: vcvttss2sis %xmm22, %ecx +# INTEL: vcvttss2sis ecx, xmm22 +0x62,0xb5,0x7e,0x08,0x6d,0xce + +# ATT: vcvttss2sis {sae}, %xmm22, %ecx +# INTEL: vcvttss2sis ecx, xmm22, {sae} +0x62,0xb5,0x7e,0x18,0x6d,0xce + +# ATT: vcvttss2sis %xmm22, %r9 +# INTEL: vcvttss2sis r9, xmm22 +0x62,0x35,0xfe,0x08,0x6d,0xce + +# ATT: vcvttss2sis {sae}, %xmm22, %r9 +# INTEL: vcvttss2sis r9, xmm22, {sae} +0x62,0x35,0xfe,0x18,0x6d,0xce + +# ATT: vcvttss2sis 268435456(%rbp,%r14,8), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [rbp + 8*r14 + 268435456] +0x62,0xb5,0x7e,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttss2sis 291(%r8,%rax,4), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [r8 + 4*rax + 291] +0x62,0xd5,0x7e,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttss2sis (%rip), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [rip] +0x62,0xf5,0x7e,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttss2sis -128(,%rbp,2), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [2*rbp - 128] +0x62,0xf5,0x7e,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff + +# ATT: vcvttss2sis 508(%rcx), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [rcx + 508] +0x62,0xf5,0x7e,0x08,0x6d,0x49,0x7f + +# ATT: vcvttss2sis -512(%rdx), %ecx +# INTEL: vcvttss2sis ecx, dword ptr [rdx - 512] +0x62,0xf5,0x7e,0x08,0x6d,0x4a,0x80 + +# ATT: vcvttss2sis 268435456(%rbp,%r14,8), %r9 +# INTEL: vcvttss2sis r9, dword ptr [rbp + 8*r14 + 268435456] +0x62,0x35,0xfe,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttss2sis 291(%r8,%rax,4), %r9 +# INTEL: vcvttss2sis r9, dword ptr [r8 + 4*rax + 291] +0x62,0x55,0xfe,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttss2sis (%rip), %r9 +# INTEL: vcvttss2sis r9, dword ptr [rip] +0x62,0x75,0xfe,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttss2sis -128(,%rbp,2), %r9 +# INTEL: vcvttss2sis r9, dword ptr [2*rbp - 128] +0x62,0x75,0xfe,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff + +# ATT: vcvttss2sis 508(%rcx), %r9 +# INTEL: vcvttss2sis r9, dword ptr [rcx + 508] +0x62,0x75,0xfe,0x08,0x6d,0x49,0x7f + +# ATT: vcvttss2sis -512(%rdx), %r9 +# INTEL: vcvttss2sis r9, dword ptr [rdx - 512] +0x62,0x75,0xfe,0x08,0x6d,0x4a,0x80 + +# ATT: vcvttss2usis %xmm22, %ecx +# INTEL: vcvttss2usis ecx, xmm22 +0x62,0xb5,0x7e,0x08,0x6c,0xce + +# ATT: vcvttss2usis {sae}, %xmm22, %ecx +# INTEL: vcvttss2usis ecx, xmm22, {sae} +0x62,0xb5,0x7e,0x18,0x6c,0xce + +# ATT: vcvttss2usis %xmm22, %r9 +# INTEL: vcvttss2usis r9, xmm22 +0x62,0x35,0xfe,0x08,0x6c,0xce + +# ATT: vcvttss2usis {sae}, %xmm22, %r9 +# INTEL: vcvttss2usis r9, xmm22, {sae} +0x62,0x35,0xfe,0x18,0x6c,0xce + +# ATT: vcvttss2usis 268435456(%rbp,%r14,8), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [rbp + 8*r14 + 268435456] +0x62,0xb5,0x7e,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttss2usis 291(%r8,%rax,4), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [r8 + 4*rax + 291] +0x62,0xd5,0x7e,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttss2usis (%rip), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [rip] +0x62,0xf5,0x7e,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttss2usis -128(,%rbp,2), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [2*rbp - 128] +0x62,0xf5,0x7e,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff + +# ATT: vcvttss2usis 508(%rcx), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [rcx + 508] +0x62,0xf5,0x7e,0x08,0x6c,0x49,0x7f + +# ATT: vcvttss2usis -512(%rdx), %ecx +# INTEL: vcvttss2usis ecx, dword ptr [rdx - 512] +0x62,0xf5,0x7e,0x08,0x6c,0x4a,0x80 + +# ATT: vcvttss2usis 268435456(%rbp,%r14,8), %r9 +# INTEL: vcvttss2usis r9, dword ptr [rbp + 8*r14 + 268435456] +0x62,0x35,0xfe,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vcvttss2usis 291(%r8,%rax,4), %r9 +# INTEL: vcvttss2usis r9, dword ptr [r8 + 4*rax + 291] +0x62,0x55,0xfe,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00 + +# ATT: vcvttss2usis (%rip), %r9 +# INTEL: vcvttss2usis r9, dword ptr [rip] +0x62,0x75,0xfe,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00 + +# ATT: vcvttss2usis -128(,%rbp,2), %r9 +# INTEL: vcvttss2usis r9, dword ptr [2*rbp - 128] +0x62,0x75,0xfe,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff + +# ATT: vcvttss2usis 508(%rcx), %r9 +# INTEL: vcvttss2usis r9, dword ptr [rcx + 508] +0x62,0x75,0xfe,0x08,0x6c,0x49,0x7f + +# ATT: vcvttss2usis -512(%rdx), %r9 +# INTEL: vcvttss2usis r9, dword ptr [rdx - 512] +0x62,0x75,0xfe,0x08,0x6c,0x4a,0x80 + diff --git a/llvm/test/MC/X86/avx10_2satcvtds-32-att.s b/llvm/test/MC/X86/avx10_2satcvtds-32-att.s new file mode 100644 index 0000000..ec59839 --- /dev/null +++ b/llvm/test/MC/X86/avx10_2satcvtds-32-att.s @@ -0,0 +1,1042 @@ +// RUN: llvm-mc -triple i386 --show-encoding %s | FileCheck %s + +// CHECK: vcvttsd2sis %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0xca] + vcvttsd2sis %xmm2, %ecx + +// CHECK: vcvttsd2sis {sae}, %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6d,0xca] + vcvttsd2sis {sae}, %xmm2, %ecx + +// CHECK: vcvttsd2sis 268435456(%esp,%esi,8), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttsd2sis 268435456(%esp,%esi,8), %ecx + +// CHECK: vcvttsd2sis 291(%edi,%eax,4), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttsd2sis 291(%edi,%eax,4), %ecx + +// CHECK: vcvttsd2sis (%eax), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x08] + vcvttsd2sis (%eax), %ecx + +// CHECK: vcvttsd2sis -256(,%ebp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2sis -256(,%ebp,2), %ecx + +// CHECK: vcvttsd2sis 1016(%ecx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x49,0x7f] + vcvttsd2sis 1016(%ecx), %ecx + +// CHECK: vcvttsd2sis -1024(%edx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x4a,0x80] + vcvttsd2sis -1024(%edx), %ecx + +// CHECK: vcvttsd2usis %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0xca] + vcvttsd2usis %xmm2, %ecx + +// CHECK: vcvttsd2usis {sae}, %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6c,0xca] + vcvttsd2usis {sae}, %xmm2, %ecx + +// CHECK: vcvttsd2usis 268435456(%esp,%esi,8), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttsd2usis 268435456(%esp,%esi,8), %ecx + +// CHECK: vcvttsd2usis 291(%edi,%eax,4), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttsd2usis 291(%edi,%eax,4), %ecx + +// CHECK: vcvttsd2usis (%eax), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x08] + vcvttsd2usis (%eax), %ecx + +// CHECK: vcvttsd2usis -256(,%ebp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2usis -256(,%ebp,2), %ecx + +// CHECK: vcvttsd2usis 1016(%ecx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x49,0x7f] + vcvttsd2usis 1016(%ecx), %ecx + +// CHECK: vcvttsd2usis -1024(%edx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x4a,0x80] + vcvttsd2usis -1024(%edx), %ecx + +// CHECK: vcvttss2sis %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0xca] + vcvttss2sis %xmm2, %ecx + +// CHECK: vcvttss2sis {sae}, %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x18,0x6d,0xca] + vcvttss2sis {sae}, %xmm2, %ecx + +// CHECK: vcvttss2sis 268435456(%esp,%esi,8), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttss2sis 268435456(%esp,%esi,8), %ecx + +// CHECK: vcvttss2sis 291(%edi,%eax,4), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttss2sis 291(%edi,%eax,4), %ecx + +// CHECK: vcvttss2sis (%eax), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x08] + vcvttss2sis (%eax), %ecx + +// CHECK: vcvttss2sis -128(,%ebp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2sis -128(,%ebp,2), %ecx + +// CHECK: vcvttss2sis 508(%ecx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x49,0x7f] + vcvttss2sis 508(%ecx), %ecx + +// CHECK: vcvttss2sis -512(%edx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x4a,0x80] + vcvttss2sis -512(%edx), %ecx + +// CHECK: vcvttss2usis %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0xca] + vcvttss2usis %xmm2, %ecx + +// CHECK: vcvttss2usis {sae}, %xmm2, %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x18,0x6c,0xca] + vcvttss2usis {sae}, %xmm2, %ecx + +// CHECK: vcvttss2usis 268435456(%esp,%esi,8), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttss2usis 268435456(%esp,%esi,8), %ecx + +// CHECK: vcvttss2usis 291(%edi,%eax,4), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttss2usis 291(%edi,%eax,4), %ecx + +// CHECK: vcvttss2usis (%eax), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x08] + vcvttss2usis (%eax), %ecx + +// CHECK: vcvttss2usis -128(,%ebp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2usis -128(,%ebp,2), %ecx + +// CHECK: vcvttss2usis 508(%ecx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x49,0x7f] + vcvttss2usis 508(%ecx), %ecx + +// CHECK: vcvttss2usis -512(%edx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x4a,0x80] + vcvttss2usis -512(%edx), %ecx + +// CHECK: vcvttpd2dqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6d,0xd3] + vcvttpd2dqs %xmm3, %xmm2 + +// CHECK: vcvttpd2dqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6d,0xd3] + vcvttpd2dqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttpd2dqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6d,0xd3] + vcvttpd2dqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2dqs %ymm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6d,0xd3] + vcvttpd2dqs %ymm3, %xmm2 + +// CHECK: vcvttpd2dqs {sae}, %ymm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xf8,0x18,0x6d,0xd3] + vcvttpd2dqs {sae}, %ymm3, %xmm2 + +// CHECK: vcvttpd2dqs %ymm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x2f,0x6d,0xd3] + vcvttpd2dqs %ymm3, %xmm2 {%k7} + +// CHECK: vcvttpd2dqs {sae}, %ymm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xf8,0x9f,0x6d,0xd3] + vcvttpd2dqs {sae}, %ymm3, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2dqs %zmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6d,0xd3] + vcvttpd2dqs %zmm3, %ymm2 + +// CHECK: vcvttpd2dqs {sae}, %zmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6d,0xd3] + vcvttpd2dqs {sae}, %zmm3, %ymm2 + +// CHECK: vcvttpd2dqs %zmm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6d,0xd3] + vcvttpd2dqs %zmm3, %ymm2 {%k7} + +// CHECK: vcvttpd2dqs {sae}, %zmm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6d,0xd3] + vcvttpd2dqs {sae}, %zmm3, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2dqsx 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2dqsx 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttpd2dqsx 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2dqsx 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttpd2dqs (%eax){1to2}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6d,0x10] + vcvttpd2dqs (%eax){1to2}, %xmm2 + +// CHECK: vcvttpd2dqsx -512(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2dqsx -512(,%ebp,2), %xmm2 + +// CHECK: vcvttpd2dqsx 2032(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6d,0x51,0x7f] + vcvttpd2dqsx 2032(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttpd2dqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6d,0x52,0x80] + vcvttpd2dqs -1024(%edx){1to2}, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2dqs (%eax){1to4}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x38,0x6d,0x10] + vcvttpd2dqs (%eax){1to4}, %xmm2 + +// CHECK: vcvttpd2dqsy -1024(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2dqsy -1024(,%ebp,2), %xmm2 + +// CHECK: vcvttpd2dqsy 4064(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xaf,0x6d,0x51,0x7f] + vcvttpd2dqsy 4064(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttpd2dqs -1024(%edx){1to4}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xbf,0x6d,0x52,0x80] + vcvttpd2dqs -1024(%edx){1to4}, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2dqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2dqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttpd2dqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2dqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttpd2dqs (%eax){1to8}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x58,0x6d,0x10] + vcvttpd2dqs (%eax){1to8}, %ymm2 + +// CHECK: vcvttpd2dqs -2048(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2dqs -2048(,%ebp,2), %ymm2 + +// CHECK: vcvttpd2dqs 8128(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xcf,0x6d,0x51,0x7f] + vcvttpd2dqs 8128(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttpd2dqs -1024(%edx){1to8}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xdf,0x6d,0x52,0x80] + vcvttpd2dqs -1024(%edx){1to8}, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2qqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6d,0xd3] + vcvttpd2qqs %xmm3, %xmm2 + +// CHECK: vcvttpd2qqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6d,0xd3] + vcvttpd2qqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttpd2qqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6d,0xd3] + vcvttpd2qqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2qqs %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6d,0xd3] + vcvttpd2qqs %ymm3, %ymm2 + +// CHECK: vcvttpd2qqs {sae}, %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xf9,0x18,0x6d,0xd3] + vcvttpd2qqs {sae}, %ymm3, %ymm2 + +// CHECK: vcvttpd2qqs %ymm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6d,0xd3] + vcvttpd2qqs %ymm3, %ymm2 {%k7} + +// CHECK: vcvttpd2qqs {sae}, %ymm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xf9,0x9f,0x6d,0xd3] + vcvttpd2qqs {sae}, %ymm3, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2qqs %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6d,0xd3] + vcvttpd2qqs %zmm3, %zmm2 + +// CHECK: vcvttpd2qqs {sae}, %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6d,0xd3] + vcvttpd2qqs {sae}, %zmm3, %zmm2 + +// CHECK: vcvttpd2qqs %zmm3, %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6d,0xd3] + vcvttpd2qqs %zmm3, %zmm2 {%k7} + +// CHECK: vcvttpd2qqs {sae}, %zmm3, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6d,0xd3] + vcvttpd2qqs {sae}, %zmm3, %zmm2 {%k7} {z} + +// CHECK: vcvttpd2qqs 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2qqs 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttpd2qqs 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2qqs 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttpd2qqs (%eax){1to2}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6d,0x10] + vcvttpd2qqs (%eax){1to2}, %xmm2 + +// CHECK: vcvttpd2qqs -512(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2qqs -512(,%ebp,2), %xmm2 + +// CHECK: vcvttpd2qqs 2032(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6d,0x51,0x7f] + vcvttpd2qqs 2032(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttpd2qqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6d,0x52,0x80] + vcvttpd2qqs -1024(%edx){1to2}, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2qqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2qqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttpd2qqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2qqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttpd2qqs (%eax){1to4}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x38,0x6d,0x10] + vcvttpd2qqs (%eax){1to4}, %ymm2 + +// CHECK: vcvttpd2qqs -1024(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2qqs -1024(,%ebp,2), %ymm2 + +// CHECK: vcvttpd2qqs 4064(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xaf,0x6d,0x51,0x7f] + vcvttpd2qqs 4064(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttpd2qqs -1024(%edx){1to4}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xbf,0x6d,0x52,0x80] + vcvttpd2qqs -1024(%edx){1to4}, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2qqs 268435456(%esp,%esi,8), %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2qqs 268435456(%esp,%esi,8), %zmm2 + +// CHECK: vcvttpd2qqs 291(%edi,%eax,4), %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2qqs 291(%edi,%eax,4), %zmm2 {%k7} + +// CHECK: vcvttpd2qqs (%eax){1to8}, %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x58,0x6d,0x10] + vcvttpd2qqs (%eax){1to8}, %zmm2 + +// CHECK: vcvttpd2qqs -2048(,%ebp,2), %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2qqs -2048(,%ebp,2), %zmm2 + +// CHECK: vcvttpd2qqs 8128(%ecx), %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xcf,0x6d,0x51,0x7f] + vcvttpd2qqs 8128(%ecx), %zmm2 {%k7} {z} + +// CHECK: vcvttpd2qqs -1024(%edx){1to8}, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xdf,0x6d,0x52,0x80] + vcvttpd2qqs -1024(%edx){1to8}, %zmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6c,0xd3] + vcvttpd2udqs %xmm3, %xmm2 + +// CHECK: vcvttpd2udqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6c,0xd3] + vcvttpd2udqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttpd2udqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6c,0xd3] + vcvttpd2udqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs %ymm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6c,0xd3] + vcvttpd2udqs %ymm3, %xmm2 + +// CHECK: vcvttpd2udqs {sae}, %ymm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xf8,0x18,0x6c,0xd3] + vcvttpd2udqs {sae}, %ymm3, %xmm2 + +// CHECK: vcvttpd2udqs %ymm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x2f,0x6c,0xd3] + vcvttpd2udqs %ymm3, %xmm2 {%k7} + +// CHECK: vcvttpd2udqs {sae}, %ymm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xf8,0x9f,0x6c,0xd3] + vcvttpd2udqs {sae}, %ymm3, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs %zmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6c,0xd3] + vcvttpd2udqs %zmm3, %ymm2 + +// CHECK: vcvttpd2udqs {sae}, %zmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6c,0xd3] + vcvttpd2udqs {sae}, %zmm3, %ymm2 + +// CHECK: vcvttpd2udqs %zmm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6c,0xd3] + vcvttpd2udqs %zmm3, %ymm2 {%k7} + +// CHECK: vcvttpd2udqs {sae}, %zmm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6c,0xd3] + vcvttpd2udqs {sae}, %zmm3, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2udqsx 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2udqsx 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttpd2udqsx 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2udqsx 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttpd2udqs (%eax){1to2}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6c,0x10] + vcvttpd2udqs (%eax){1to2}, %xmm2 + +// CHECK: vcvttpd2udqsx -512(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2udqsx -512(,%ebp,2), %xmm2 + +// CHECK: vcvttpd2udqsx 2032(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6c,0x51,0x7f] + vcvttpd2udqsx 2032(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6c,0x52,0x80] + vcvttpd2udqs -1024(%edx){1to2}, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs (%eax){1to4}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x38,0x6c,0x10] + vcvttpd2udqs (%eax){1to4}, %xmm2 + +// CHECK: vcvttpd2udqsy -1024(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2udqsy -1024(,%ebp,2), %xmm2 + +// CHECK: vcvttpd2udqsy 4064(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xaf,0x6c,0x51,0x7f] + vcvttpd2udqsy 4064(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs -1024(%edx){1to4}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xbf,0x6c,0x52,0x80] + vcvttpd2udqs -1024(%edx){1to4}, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2udqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2udqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttpd2udqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2udqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttpd2udqs (%eax){1to8}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x58,0x6c,0x10] + vcvttpd2udqs (%eax){1to8}, %ymm2 + +// CHECK: vcvttpd2udqs -2048(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2udqs -2048(,%ebp,2), %ymm2 + +// CHECK: vcvttpd2udqs 8128(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xcf,0x6c,0x51,0x7f] + vcvttpd2udqs 8128(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttpd2udqs -1024(%edx){1to8}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfc,0xdf,0x6c,0x52,0x80] + vcvttpd2udqs -1024(%edx){1to8}, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6c,0xd3] + vcvttpd2uqqs %xmm3, %xmm2 + +// CHECK: vcvttpd2uqqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6c,0xd3] + vcvttpd2uqqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttpd2uqqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6c,0xd3] + vcvttpd2uqqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6c,0xd3] + vcvttpd2uqqs %ymm3, %ymm2 + +// CHECK: vcvttpd2uqqs {sae}, %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xf9,0x18,0x6c,0xd3] + vcvttpd2uqqs {sae}, %ymm3, %ymm2 + +// CHECK: vcvttpd2uqqs %ymm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6c,0xd3] + vcvttpd2uqqs %ymm3, %ymm2 {%k7} + +// CHECK: vcvttpd2uqqs {sae}, %ymm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xf9,0x9f,0x6c,0xd3] + vcvttpd2uqqs {sae}, %ymm3, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6c,0xd3] + vcvttpd2uqqs %zmm3, %zmm2 + +// CHECK: vcvttpd2uqqs {sae}, %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6c,0xd3] + vcvttpd2uqqs {sae}, %zmm3, %zmm2 + +// CHECK: vcvttpd2uqqs %zmm3, %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6c,0xd3] + vcvttpd2uqqs %zmm3, %zmm2 {%k7} + +// CHECK: vcvttpd2uqqs {sae}, %zmm3, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6c,0xd3] + vcvttpd2uqqs {sae}, %zmm3, %zmm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2uqqs 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttpd2uqqs 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2uqqs 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttpd2uqqs (%eax){1to2}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6c,0x10] + vcvttpd2uqqs (%eax){1to2}, %xmm2 + +// CHECK: vcvttpd2uqqs -512(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2uqqs -512(,%ebp,2), %xmm2 + +// CHECK: vcvttpd2uqqs 2032(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6c,0x51,0x7f] + vcvttpd2uqqs 2032(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs -1024(%edx){1to2}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6c,0x52,0x80] + vcvttpd2uqqs -1024(%edx){1to2}, %xmm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2uqqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttpd2uqqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2uqqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttpd2uqqs (%eax){1to4}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x38,0x6c,0x10] + vcvttpd2uqqs (%eax){1to4}, %ymm2 + +// CHECK: vcvttpd2uqqs -1024(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2uqqs -1024(,%ebp,2), %ymm2 + +// CHECK: vcvttpd2uqqs 4064(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xaf,0x6c,0x51,0x7f] + vcvttpd2uqqs 4064(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs -1024(%edx){1to4}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xbf,0x6c,0x52,0x80] + vcvttpd2uqqs -1024(%edx){1to4}, %ymm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs 268435456(%esp,%esi,8), %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2uqqs 268435456(%esp,%esi,8), %zmm2 + +// CHECK: vcvttpd2uqqs 291(%edi,%eax,4), %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2uqqs 291(%edi,%eax,4), %zmm2 {%k7} + +// CHECK: vcvttpd2uqqs (%eax){1to8}, %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x58,0x6c,0x10] + vcvttpd2uqqs (%eax){1to8}, %zmm2 + +// CHECK: vcvttpd2uqqs -2048(,%ebp,2), %zmm2 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2uqqs -2048(,%ebp,2), %zmm2 + +// CHECK: vcvttpd2uqqs 8128(%ecx), %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xcf,0x6c,0x51,0x7f] + vcvttpd2uqqs 8128(%ecx), %zmm2 {%k7} {z} + +// CHECK: vcvttpd2uqqs -1024(%edx){1to8}, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0xfd,0xdf,0x6c,0x52,0x80] + vcvttpd2uqqs -1024(%edx){1to8}, %zmm2 {%k7} {z} + +// CHECK: vcvttps2dqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6d,0xd3] + vcvttps2dqs %xmm3, %xmm2 + +// CHECK: vcvttps2dqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6d,0xd3] + vcvttps2dqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttps2dqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6d,0xd3] + vcvttps2dqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttps2dqs %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6d,0xd3] + vcvttps2dqs %ymm3, %ymm2 + +// CHECK: vcvttps2dqs {sae}, %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6d,0xd3] + vcvttps2dqs {sae}, %ymm3, %ymm2 + +// CHECK: vcvttps2dqs %ymm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6d,0xd3] + vcvttps2dqs %ymm3, %ymm2 {%k7} + +// CHECK: vcvttps2dqs {sae}, %ymm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x6d,0xd3] + vcvttps2dqs {sae}, %ymm3, %ymm2 {%k7} {z} + +// CHECK: vcvttps2dqs %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6d,0xd3] + vcvttps2dqs %zmm3, %zmm2 + +// CHECK: vcvttps2dqs {sae}, %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6d,0xd3] + vcvttps2dqs {sae}, %zmm3, %zmm2 + +// CHECK: vcvttps2dqs %zmm3, %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6d,0xd3] + vcvttps2dqs %zmm3, %zmm2 {%k7} + +// CHECK: vcvttps2dqs {sae}, %zmm3, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6d,0xd3] + vcvttps2dqs {sae}, %zmm3, %zmm2 {%k7} {z} + +// CHECK: vcvttps2dqs 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2dqs 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttps2dqs 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2dqs 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttps2dqs (%eax){1to4}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6d,0x10] + vcvttps2dqs (%eax){1to4}, %xmm2 + +// CHECK: vcvttps2dqs -512(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2dqs -512(,%ebp,2), %xmm2 + +// CHECK: vcvttps2dqs 2032(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6d,0x51,0x7f] + vcvttps2dqs 2032(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttps2dqs -512(%edx){1to4}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6d,0x52,0x80] + vcvttps2dqs -512(%edx){1to4}, %xmm2 {%k7} {z} + +// CHECK: vcvttps2dqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2dqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttps2dqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2dqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttps2dqs (%eax){1to8}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6d,0x10] + vcvttps2dqs (%eax){1to8}, %ymm2 + +// CHECK: vcvttps2dqs -1024(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2dqs -1024(,%ebp,2), %ymm2 + +// CHECK: vcvttps2dqs 4064(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6d,0x51,0x7f] + vcvttps2dqs 4064(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttps2dqs -512(%edx){1to8}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6d,0x52,0x80] + vcvttps2dqs -512(%edx){1to8}, %ymm2 {%k7} {z} + +// CHECK: vcvttps2dqs 268435456(%esp,%esi,8), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2dqs 268435456(%esp,%esi,8), %zmm2 + +// CHECK: vcvttps2dqs 291(%edi,%eax,4), %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2dqs 291(%edi,%eax,4), %zmm2 {%k7} + +// CHECK: vcvttps2dqs (%eax){1to16}, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6d,0x10] + vcvttps2dqs (%eax){1to16}, %zmm2 + +// CHECK: vcvttps2dqs -2048(,%ebp,2), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2dqs -2048(,%ebp,2), %zmm2 + +// CHECK: vcvttps2dqs 8128(%ecx), %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6d,0x51,0x7f] + vcvttps2dqs 8128(%ecx), %zmm2 {%k7} {z} + +// CHECK: vcvttps2dqs -512(%edx){1to16}, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6d,0x52,0x80] + vcvttps2dqs -512(%edx){1to16}, %zmm2 {%k7} {z} + +// CHECK: vcvttps2qqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6d,0xd3] + vcvttps2qqs %xmm3, %xmm2 + +// CHECK: vcvttps2qqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6d,0xd3] + vcvttps2qqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttps2qqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6d,0xd3] + vcvttps2qqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttps2qqs %xmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6d,0xd3] + vcvttps2qqs %xmm3, %ymm2 + +// CHECK: vcvttps2qqs {sae}, %xmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6d,0xd3] + vcvttps2qqs {sae}, %xmm3, %ymm2 + +// CHECK: vcvttps2qqs %xmm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6d,0xd3] + vcvttps2qqs %xmm3, %ymm2 {%k7} + +// CHECK: vcvttps2qqs {sae}, %xmm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x6d,0xd3] + vcvttps2qqs {sae}, %xmm3, %ymm2 {%k7} {z} + +// CHECK: vcvttps2qqs %ymm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6d,0xd3] + vcvttps2qqs %ymm3, %zmm2 + +// CHECK: vcvttps2qqs {sae}, %ymm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6d,0xd3] + vcvttps2qqs {sae}, %ymm3, %zmm2 + +// CHECK: vcvttps2qqs %ymm3, %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6d,0xd3] + vcvttps2qqs %ymm3, %zmm2 {%k7} + +// CHECK: vcvttps2qqs {sae}, %ymm3, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6d,0xd3] + vcvttps2qqs {sae}, %ymm3, %zmm2 {%k7} {z} + +// CHECK: vcvttps2qqs 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2qqs 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttps2qqs 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2qqs 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttps2qqs (%eax){1to2}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6d,0x10] + vcvttps2qqs (%eax){1to2}, %xmm2 + +// CHECK: vcvttps2qqs -256(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6d,0x14,0x6d,0x00,0xff,0xff,0xff] + vcvttps2qqs -256(,%ebp,2), %xmm2 + +// CHECK: vcvttps2qqs 1016(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6d,0x51,0x7f] + vcvttps2qqs 1016(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttps2qqs -512(%edx){1to2}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6d,0x52,0x80] + vcvttps2qqs -512(%edx){1to2}, %xmm2 {%k7} {z} + +// CHECK: vcvttps2qqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2qqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttps2qqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2qqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttps2qqs (%eax){1to4}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6d,0x10] + vcvttps2qqs (%eax){1to4}, %ymm2 + +// CHECK: vcvttps2qqs -512(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2qqs -512(,%ebp,2), %ymm2 + +// CHECK: vcvttps2qqs 2032(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6d,0x51,0x7f] + vcvttps2qqs 2032(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttps2qqs -512(%edx){1to4}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6d,0x52,0x80] + vcvttps2qqs -512(%edx){1to4}, %ymm2 {%k7} {z} + +// CHECK: vcvttps2qqs 268435456(%esp,%esi,8), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2qqs 268435456(%esp,%esi,8), %zmm2 + +// CHECK: vcvttps2qqs 291(%edi,%eax,4), %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2qqs 291(%edi,%eax,4), %zmm2 {%k7} + +// CHECK: vcvttps2qqs (%eax){1to8}, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6d,0x10] + vcvttps2qqs (%eax){1to8}, %zmm2 + +// CHECK: vcvttps2qqs -1024(,%ebp,2), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2qqs -1024(,%ebp,2), %zmm2 + +// CHECK: vcvttps2qqs 4064(%ecx), %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6d,0x51,0x7f] + vcvttps2qqs 4064(%ecx), %zmm2 {%k7} {z} + +// CHECK: vcvttps2qqs -512(%edx){1to8}, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6d,0x52,0x80] + vcvttps2qqs -512(%edx){1to8}, %zmm2 {%k7} {z} + +// CHECK: vcvttps2udqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6c,0xd3] + vcvttps2udqs %xmm3, %xmm2 + +// CHECK: vcvttps2udqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6c,0xd3] + vcvttps2udqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttps2udqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6c,0xd3] + vcvttps2udqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttps2udqs %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6c,0xd3] + vcvttps2udqs %ymm3, %ymm2 + +// CHECK: vcvttps2udqs {sae}, %ymm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6c,0xd3] + vcvttps2udqs {sae}, %ymm3, %ymm2 + +// CHECK: vcvttps2udqs %ymm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6c,0xd3] + vcvttps2udqs %ymm3, %ymm2 {%k7} + +// CHECK: vcvttps2udqs {sae}, %ymm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x6c,0xd3] + vcvttps2udqs {sae}, %ymm3, %ymm2 {%k7} {z} + +// CHECK: vcvttps2udqs %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6c,0xd3] + vcvttps2udqs %zmm3, %zmm2 + +// CHECK: vcvttps2udqs {sae}, %zmm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6c,0xd3] + vcvttps2udqs {sae}, %zmm3, %zmm2 + +// CHECK: vcvttps2udqs %zmm3, %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6c,0xd3] + vcvttps2udqs %zmm3, %zmm2 {%k7} + +// CHECK: vcvttps2udqs {sae}, %zmm3, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6c,0xd3] + vcvttps2udqs {sae}, %zmm3, %zmm2 {%k7} {z} + +// CHECK: vcvttps2udqs 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2udqs 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttps2udqs 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2udqs 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttps2udqs (%eax){1to4}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6c,0x10] + vcvttps2udqs (%eax){1to4}, %xmm2 + +// CHECK: vcvttps2udqs -512(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2udqs -512(,%ebp,2), %xmm2 + +// CHECK: vcvttps2udqs 2032(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6c,0x51,0x7f] + vcvttps2udqs 2032(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttps2udqs -512(%edx){1to4}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6c,0x52,0x80] + vcvttps2udqs -512(%edx){1to4}, %xmm2 {%k7} {z} + +// CHECK: vcvttps2udqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2udqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttps2udqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2udqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttps2udqs (%eax){1to8}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6c,0x10] + vcvttps2udqs (%eax){1to8}, %ymm2 + +// CHECK: vcvttps2udqs -1024(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2udqs -1024(,%ebp,2), %ymm2 + +// CHECK: vcvttps2udqs 4064(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6c,0x51,0x7f] + vcvttps2udqs 4064(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttps2udqs -512(%edx){1to8}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6c,0x52,0x80] + vcvttps2udqs -512(%edx){1to8}, %ymm2 {%k7} {z} + +// CHECK: vcvttps2udqs 268435456(%esp,%esi,8), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2udqs 268435456(%esp,%esi,8), %zmm2 + +// CHECK: vcvttps2udqs 291(%edi,%eax,4), %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2udqs 291(%edi,%eax,4), %zmm2 {%k7} + +// CHECK: vcvttps2udqs (%eax){1to16}, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6c,0x10] + vcvttps2udqs (%eax){1to16}, %zmm2 + +// CHECK: vcvttps2udqs -2048(,%ebp,2), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2udqs -2048(,%ebp,2), %zmm2 + +// CHECK: vcvttps2udqs 8128(%ecx), %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6c,0x51,0x7f] + vcvttps2udqs 8128(%ecx), %zmm2 {%k7} {z} + +// CHECK: vcvttps2udqs -512(%edx){1to16}, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6c,0x52,0x80] + vcvttps2udqs -512(%edx){1to16}, %zmm2 {%k7} {z} + +// CHECK: vcvttps2uqqs %xmm3, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6c,0xd3] + vcvttps2uqqs %xmm3, %xmm2 + +// CHECK: vcvttps2uqqs %xmm3, %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6c,0xd3] + vcvttps2uqqs %xmm3, %xmm2 {%k7} + +// CHECK: vcvttps2uqqs %xmm3, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6c,0xd3] + vcvttps2uqqs %xmm3, %xmm2 {%k7} {z} + +// CHECK: vcvttps2uqqs %xmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6c,0xd3] + vcvttps2uqqs %xmm3, %ymm2 + +// CHECK: vcvttps2uqqs {sae}, %xmm3, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6c,0xd3] + vcvttps2uqqs {sae}, %xmm3, %ymm2 + +// CHECK: vcvttps2uqqs %xmm3, %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6c,0xd3] + vcvttps2uqqs %xmm3, %ymm2 {%k7} + +// CHECK: vcvttps2uqqs {sae}, %xmm3, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x6c,0xd3] + vcvttps2uqqs {sae}, %xmm3, %ymm2 {%k7} {z} + +// CHECK: vcvttps2uqqs %ymm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6c,0xd3] + vcvttps2uqqs %ymm3, %zmm2 + +// CHECK: vcvttps2uqqs {sae}, %ymm3, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6c,0xd3] + vcvttps2uqqs {sae}, %ymm3, %zmm2 + +// CHECK: vcvttps2uqqs %ymm3, %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6c,0xd3] + vcvttps2uqqs %ymm3, %zmm2 {%k7} + +// CHECK: vcvttps2uqqs {sae}, %ymm3, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6c,0xd3] + vcvttps2uqqs {sae}, %ymm3, %zmm2 {%k7} {z} + +// CHECK: vcvttps2uqqs 268435456(%esp,%esi,8), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2uqqs 268435456(%esp,%esi,8), %xmm2 + +// CHECK: vcvttps2uqqs 291(%edi,%eax,4), %xmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2uqqs 291(%edi,%eax,4), %xmm2 {%k7} + +// CHECK: vcvttps2uqqs (%eax){1to2}, %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6c,0x10] + vcvttps2uqqs (%eax){1to2}, %xmm2 + +// CHECK: vcvttps2uqqs -256(,%ebp,2), %xmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6c,0x14,0x6d,0x00,0xff,0xff,0xff] + vcvttps2uqqs -256(,%ebp,2), %xmm2 + +// CHECK: vcvttps2uqqs 1016(%ecx), %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6c,0x51,0x7f] + vcvttps2uqqs 1016(%ecx), %xmm2 {%k7} {z} + +// CHECK: vcvttps2uqqs -512(%edx){1to2}, %xmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6c,0x52,0x80] + vcvttps2uqqs -512(%edx){1to2}, %xmm2 {%k7} {z} + +// CHECK: vcvttps2uqqs 268435456(%esp,%esi,8), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2uqqs 268435456(%esp,%esi,8), %ymm2 + +// CHECK: vcvttps2uqqs 291(%edi,%eax,4), %ymm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2uqqs 291(%edi,%eax,4), %ymm2 {%k7} + +// CHECK: vcvttps2uqqs (%eax){1to4}, %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6c,0x10] + vcvttps2uqqs (%eax){1to4}, %ymm2 + +// CHECK: vcvttps2uqqs -512(,%ebp,2), %ymm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2uqqs -512(,%ebp,2), %ymm2 + +// CHECK: vcvttps2uqqs 2032(%ecx), %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6c,0x51,0x7f] + vcvttps2uqqs 2032(%ecx), %ymm2 {%k7} {z} + +// CHECK: vcvttps2uqqs -512(%edx){1to4}, %ymm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6c,0x52,0x80] + vcvttps2uqqs -512(%edx){1to4}, %ymm2 {%k7} {z} + +// CHECK: vcvttps2uqqs 268435456(%esp,%esi,8), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2uqqs 268435456(%esp,%esi,8), %zmm2 + +// CHECK: vcvttps2uqqs 291(%edi,%eax,4), %zmm2 {%k7} +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2uqqs 291(%edi,%eax,4), %zmm2 {%k7} + +// CHECK: vcvttps2uqqs (%eax){1to8}, %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6c,0x10] + vcvttps2uqqs (%eax){1to8}, %zmm2 + +// CHECK: vcvttps2uqqs -1024(,%ebp,2), %zmm2 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2uqqs -1024(,%ebp,2), %zmm2 + +// CHECK: vcvttps2uqqs 4064(%ecx), %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6c,0x51,0x7f] + vcvttps2uqqs 4064(%ecx), %zmm2 {%k7} {z} + +// CHECK: vcvttps2uqqs -512(%edx){1to8}, %zmm2 {%k7} {z} +// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6c,0x52,0x80] + vcvttps2uqqs -512(%edx){1to8}, %zmm2 {%k7} {z} + diff --git a/llvm/test/MC/X86/avx10_2satcvtds-32-intel.s b/llvm/test/MC/X86/avx10_2satcvtds-32-intel.s new file mode 100644 index 0000000..37a090d --- /dev/null +++ b/llvm/test/MC/X86/avx10_2satcvtds-32-intel.s @@ -0,0 +1,1042 @@ +// RUN: llvm-mc -triple i386 -x86-asm-syntax=intel -output-asm-variant=1 --show-encoding %s | FileCheck %s + +// CHECK: vcvttsd2sis ecx, xmm2 +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0xca] + vcvttsd2sis ecx, xmm2 + +// CHECK: vcvttsd2sis ecx, xmm2, {sae} +// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6d,0xca] + vcvttsd2sis ecx, xmm2, {sae} + +// CHECK: vcvttsd2sis ecx, qword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttsd2sis ecx, qword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttsd2sis ecx, qword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttsd2sis ecx, qword ptr [edi + 4*eax + 291] + +// CHECK: vcvttsd2sis ecx, qword ptr [eax] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x08] + vcvttsd2sis ecx, qword ptr [eax] + +// CHECK: vcvttsd2sis ecx, qword ptr [2*ebp - 256] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2sis ecx, qword ptr [2*ebp - 256] + +// CHECK: vcvttsd2sis ecx, qword ptr [ecx + 1016] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x49,0x7f] + vcvttsd2sis ecx, qword ptr [ecx + 1016] + +// CHECK: vcvttsd2sis ecx, qword ptr [edx - 1024] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x4a,0x80] + vcvttsd2sis ecx, qword ptr [edx - 1024] + +// CHECK: vcvttsd2usis ecx, xmm2 +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0xca] + vcvttsd2usis ecx, xmm2 + +// CHECK: vcvttsd2usis ecx, xmm2, {sae} +// CHECK: encoding: [0x62,0xf5,0x7f,0x18,0x6c,0xca] + vcvttsd2usis ecx, xmm2, {sae} + +// CHECK: vcvttsd2usis ecx, qword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttsd2usis ecx, qword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttsd2usis ecx, qword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttsd2usis ecx, qword ptr [edi + 4*eax + 291] + +// CHECK: vcvttsd2usis ecx, qword ptr [eax] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x08] + vcvttsd2usis ecx, qword ptr [eax] + +// CHECK: vcvttsd2usis ecx, qword ptr [2*ebp - 256] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2usis ecx, qword ptr [2*ebp - 256] + +// CHECK: vcvttsd2usis ecx, qword ptr [ecx + 1016] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x49,0x7f] + vcvttsd2usis ecx, qword ptr [ecx + 1016] + +// CHECK: vcvttsd2usis ecx, qword ptr [edx - 1024] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x4a,0x80] + vcvttsd2usis ecx, qword ptr [edx - 1024] + +// CHECK: vcvttss2sis ecx, xmm2 +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0xca] + vcvttss2sis ecx, xmm2 + +// CHECK: vcvttss2sis ecx, xmm2, {sae} +// CHECK: encoding: [0x62,0xf5,0x7e,0x18,0x6d,0xca] + vcvttss2sis ecx, xmm2, {sae} + +// CHECK: vcvttss2sis ecx, dword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttss2sis ecx, dword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttss2sis ecx, dword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttss2sis ecx, dword ptr [edi + 4*eax + 291] + +// CHECK: vcvttss2sis ecx, dword ptr [eax] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x08] + vcvttss2sis ecx, dword ptr [eax] + +// CHECK: vcvttss2sis ecx, dword ptr [2*ebp - 128] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2sis ecx, dword ptr [2*ebp - 128] + +// CHECK: vcvttss2sis ecx, dword ptr [ecx + 508] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x49,0x7f] + vcvttss2sis ecx, dword ptr [ecx + 508] + +// CHECK: vcvttss2sis ecx, dword ptr [edx - 512] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x4a,0x80] + vcvttss2sis ecx, dword ptr [edx - 512] + +// CHECK: vcvttss2usis ecx, xmm2 +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0xca] + vcvttss2usis ecx, xmm2 + +// CHECK: vcvttss2usis ecx, xmm2, {sae} +// CHECK: encoding: [0x62,0xf5,0x7e,0x18,0x6c,0xca] + vcvttss2usis ecx, xmm2, {sae} + +// CHECK: vcvttss2usis ecx, dword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x8c,0xf4,0x00,0x00,0x00,0x10] + vcvttss2usis ecx, dword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttss2usis ecx, dword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x8c,0x87,0x23,0x01,0x00,0x00] + vcvttss2usis ecx, dword ptr [edi + 4*eax + 291] + +// CHECK: vcvttss2usis ecx, dword ptr [eax] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x08] + vcvttss2usis ecx, dword ptr [eax] + +// CHECK: vcvttss2usis ecx, dword ptr [2*ebp - 128] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2usis ecx, dword ptr [2*ebp - 128] + +// CHECK: vcvttss2usis ecx, dword ptr [ecx + 508] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x49,0x7f] + vcvttss2usis ecx, dword ptr [ecx + 508] + +// CHECK: vcvttss2usis ecx, dword ptr [edx - 512] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x4a,0x80] + vcvttss2usis ecx, dword ptr [edx - 512] + +// CHECK: vcvttpd2dqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6d,0xd3] + vcvttpd2dqs xmm2, xmm3 + +// CHECK: vcvttpd2dqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6d,0xd3] + vcvttpd2dqs xmm2 {k7}, xmm3 + +// CHECK: vcvttpd2dqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6d,0xd3] + vcvttpd2dqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttpd2dqs xmm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6d,0xd3] + vcvttpd2dqs xmm2, ymm3 + +// CHECK: vcvttpd2dqs xmm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf8,0x18,0x6d,0xd3] + vcvttpd2dqs xmm2, ymm3, {sae} + +// CHECK: vcvttpd2dqs xmm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x2f,0x6d,0xd3] + vcvttpd2dqs xmm2 {k7}, ymm3 + +// CHECK: vcvttpd2dqs xmm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf8,0x9f,0x6d,0xd3] + vcvttpd2dqs xmm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttpd2dqs ymm2, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6d,0xd3] + vcvttpd2dqs ymm2, zmm3 + +// CHECK: vcvttpd2dqs ymm2, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6d,0xd3] + vcvttpd2dqs ymm2, zmm3, {sae} + +// CHECK: vcvttpd2dqs ymm2 {k7}, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6d,0xd3] + vcvttpd2dqs ymm2 {k7}, zmm3 + +// CHECK: vcvttpd2dqs ymm2 {k7} {z}, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6d,0xd3] + vcvttpd2dqs ymm2 {k7} {z}, zmm3, {sae} + +// CHECK: vcvttpd2dqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2dqs xmm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2dqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2dqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2dqs xmm2, qword ptr [eax]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6d,0x10] + vcvttpd2dqs xmm2, qword ptr [eax]{1to2} + +// CHECK: vcvttpd2dqs xmm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2dqs xmm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttpd2dqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6d,0x51,0x7f] + vcvttpd2dqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttpd2dqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6d,0x52,0x80] + vcvttpd2dqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} + +// CHECK: vcvttpd2dqs xmm2, qword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfc,0x38,0x6d,0x10] + vcvttpd2dqs xmm2, qword ptr [eax]{1to4} + +// CHECK: vcvttpd2dqs xmm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2dqs xmm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttpd2dqs xmm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0xfc,0xaf,0x6d,0x51,0x7f] + vcvttpd2dqs xmm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttpd2dqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfc,0xbf,0x6d,0x52,0x80] + vcvttpd2dqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to4} + +// CHECK: vcvttpd2dqs ymm2, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2dqs ymm2, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2dqs ymm2 {k7}, zmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2dqs ymm2 {k7}, zmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2dqs ymm2, qword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfc,0x58,0x6d,0x10] + vcvttpd2dqs ymm2, qword ptr [eax]{1to8} + +// CHECK: vcvttpd2dqs ymm2, zmmword ptr [2*ebp - 2048] +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2dqs ymm2, zmmword ptr [2*ebp - 2048] + +// CHECK: vcvttpd2dqs ymm2 {k7} {z}, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf5,0xfc,0xcf,0x6d,0x51,0x7f] + vcvttpd2dqs ymm2 {k7} {z}, zmmword ptr [ecx + 8128] + +// CHECK: vcvttpd2dqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfc,0xdf,0x6d,0x52,0x80] + vcvttpd2dqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to8} + +// CHECK: vcvttpd2qqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6d,0xd3] + vcvttpd2qqs xmm2, xmm3 + +// CHECK: vcvttpd2qqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6d,0xd3] + vcvttpd2qqs xmm2 {k7}, xmm3 + +// CHECK: vcvttpd2qqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6d,0xd3] + vcvttpd2qqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttpd2qqs ymm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6d,0xd3] + vcvttpd2qqs ymm2, ymm3 + +// CHECK: vcvttpd2qqs ymm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf9,0x18,0x6d,0xd3] + vcvttpd2qqs ymm2, ymm3, {sae} + +// CHECK: vcvttpd2qqs ymm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6d,0xd3] + vcvttpd2qqs ymm2 {k7}, ymm3 + +// CHECK: vcvttpd2qqs ymm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf9,0x9f,0x6d,0xd3] + vcvttpd2qqs ymm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttpd2qqs zmm2, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6d,0xd3] + vcvttpd2qqs zmm2, zmm3 + +// CHECK: vcvttpd2qqs zmm2, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6d,0xd3] + vcvttpd2qqs zmm2, zmm3, {sae} + +// CHECK: vcvttpd2qqs zmm2 {k7}, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6d,0xd3] + vcvttpd2qqs zmm2 {k7}, zmm3 + +// CHECK: vcvttpd2qqs zmm2 {k7} {z}, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6d,0xd3] + vcvttpd2qqs zmm2 {k7} {z}, zmm3, {sae} + +// CHECK: vcvttpd2qqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2qqs xmm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2qqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2qqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2qqs xmm2, qword ptr [eax]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6d,0x10] + vcvttpd2qqs xmm2, qword ptr [eax]{1to2} + +// CHECK: vcvttpd2qqs xmm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2qqs xmm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttpd2qqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6d,0x51,0x7f] + vcvttpd2qqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttpd2qqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6d,0x52,0x80] + vcvttpd2qqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} + +// CHECK: vcvttpd2qqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2qqs ymm2, ymmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2qqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2qqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2qqs ymm2, qword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfd,0x38,0x6d,0x10] + vcvttpd2qqs ymm2, qword ptr [eax]{1to4} + +// CHECK: vcvttpd2qqs ymm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2qqs ymm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttpd2qqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0xfd,0xaf,0x6d,0x51,0x7f] + vcvttpd2qqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttpd2qqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfd,0xbf,0x6d,0x52,0x80] + vcvttpd2qqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to4} + +// CHECK: vcvttpd2qqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2qqs zmm2, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2qqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2qqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2qqs zmm2, qword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfd,0x58,0x6d,0x10] + vcvttpd2qqs zmm2, qword ptr [eax]{1to8} + +// CHECK: vcvttpd2qqs zmm2, zmmword ptr [2*ebp - 2048] +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2qqs zmm2, zmmword ptr [2*ebp - 2048] + +// CHECK: vcvttpd2qqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf5,0xfd,0xcf,0x6d,0x51,0x7f] + vcvttpd2qqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] + +// CHECK: vcvttpd2qqs zmm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfd,0xdf,0x6d,0x52,0x80] + vcvttpd2qqs zmm2 {k7} {z}, qword ptr [edx - 1024]{1to8} + +// CHECK: vcvttpd2udqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6c,0xd3] + vcvttpd2udqs xmm2, xmm3 + +// CHECK: vcvttpd2udqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6c,0xd3] + vcvttpd2udqs xmm2 {k7}, xmm3 + +// CHECK: vcvttpd2udqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6c,0xd3] + vcvttpd2udqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttpd2udqs xmm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6c,0xd3] + vcvttpd2udqs xmm2, ymm3 + +// CHECK: vcvttpd2udqs xmm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf8,0x18,0x6c,0xd3] + vcvttpd2udqs xmm2, ymm3, {sae} + +// CHECK: vcvttpd2udqs xmm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x2f,0x6c,0xd3] + vcvttpd2udqs xmm2 {k7}, ymm3 + +// CHECK: vcvttpd2udqs xmm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf8,0x9f,0x6c,0xd3] + vcvttpd2udqs xmm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttpd2udqs ymm2, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6c,0xd3] + vcvttpd2udqs ymm2, zmm3 + +// CHECK: vcvttpd2udqs ymm2, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6c,0xd3] + vcvttpd2udqs ymm2, zmm3, {sae} + +// CHECK: vcvttpd2udqs ymm2 {k7}, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6c,0xd3] + vcvttpd2udqs ymm2 {k7}, zmm3 + +// CHECK: vcvttpd2udqs ymm2 {k7} {z}, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6c,0xd3] + vcvttpd2udqs ymm2 {k7} {z}, zmm3, {sae} + +// CHECK: vcvttpd2udqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2udqs xmm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2udqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfc,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2udqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2udqs xmm2, qword ptr [eax]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfc,0x18,0x6c,0x10] + vcvttpd2udqs xmm2, qword ptr [eax]{1to2} + +// CHECK: vcvttpd2udqs xmm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0xfc,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2udqs xmm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttpd2udqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0xfc,0x8f,0x6c,0x51,0x7f] + vcvttpd2udqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttpd2udqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfc,0x9f,0x6c,0x52,0x80] + vcvttpd2udqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} + +// CHECK: vcvttpd2udqs xmm2, qword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfc,0x38,0x6c,0x10] + vcvttpd2udqs xmm2, qword ptr [eax]{1to4} + +// CHECK: vcvttpd2udqs xmm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0xfc,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2udqs xmm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttpd2udqs xmm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0xfc,0xaf,0x6c,0x51,0x7f] + vcvttpd2udqs xmm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttpd2udqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfc,0xbf,0x6c,0x52,0x80] + vcvttpd2udqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to4} + +// CHECK: vcvttpd2udqs ymm2, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2udqs ymm2, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2udqs ymm2 {k7}, zmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfc,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2udqs ymm2 {k7}, zmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2udqs ymm2, qword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfc,0x58,0x6c,0x10] + vcvttpd2udqs ymm2, qword ptr [eax]{1to8} + +// CHECK: vcvttpd2udqs ymm2, zmmword ptr [2*ebp - 2048] +// CHECK: encoding: [0x62,0xf5,0xfc,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2udqs ymm2, zmmword ptr [2*ebp - 2048] + +// CHECK: vcvttpd2udqs ymm2 {k7} {z}, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf5,0xfc,0xcf,0x6c,0x51,0x7f] + vcvttpd2udqs ymm2 {k7} {z}, zmmword ptr [ecx + 8128] + +// CHECK: vcvttpd2udqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfc,0xdf,0x6c,0x52,0x80] + vcvttpd2udqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to8} + +// CHECK: vcvttpd2uqqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6c,0xd3] + vcvttpd2uqqs xmm2, xmm3 + +// CHECK: vcvttpd2uqqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6c,0xd3] + vcvttpd2uqqs xmm2 {k7}, xmm3 + +// CHECK: vcvttpd2uqqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6c,0xd3] + vcvttpd2uqqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttpd2uqqs ymm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6c,0xd3] + vcvttpd2uqqs ymm2, ymm3 + +// CHECK: vcvttpd2uqqs ymm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf9,0x18,0x6c,0xd3] + vcvttpd2uqqs ymm2, ymm3, {sae} + +// CHECK: vcvttpd2uqqs ymm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6c,0xd3] + vcvttpd2uqqs ymm2 {k7}, ymm3 + +// CHECK: vcvttpd2uqqs ymm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xf9,0x9f,0x6c,0xd3] + vcvttpd2uqqs ymm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttpd2uqqs zmm2, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6c,0xd3] + vcvttpd2uqqs zmm2, zmm3 + +// CHECK: vcvttpd2uqqs zmm2, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6c,0xd3] + vcvttpd2uqqs zmm2, zmm3, {sae} + +// CHECK: vcvttpd2uqqs zmm2 {k7}, zmm3 +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6c,0xd3] + vcvttpd2uqqs zmm2 {k7}, zmm3 + +// CHECK: vcvttpd2uqqs zmm2 {k7} {z}, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6c,0xd3] + vcvttpd2uqqs zmm2 {k7} {z}, zmm3, {sae} + +// CHECK: vcvttpd2uqqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2uqqs xmm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2uqqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfd,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2uqqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2uqqs xmm2, qword ptr [eax]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfd,0x18,0x6c,0x10] + vcvttpd2uqqs xmm2, qword ptr [eax]{1to2} + +// CHECK: vcvttpd2uqqs xmm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0xfd,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2uqqs xmm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttpd2uqqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0xfd,0x8f,0x6c,0x51,0x7f] + vcvttpd2uqqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttpd2uqqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} +// CHECK: encoding: [0x62,0xf5,0xfd,0x9f,0x6c,0x52,0x80] + vcvttpd2uqqs xmm2 {k7} {z}, qword ptr [edx - 1024]{1to2} + +// CHECK: vcvttpd2uqqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2uqqs ymm2, ymmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2uqqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfd,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2uqqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2uqqs ymm2, qword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfd,0x38,0x6c,0x10] + vcvttpd2uqqs ymm2, qword ptr [eax]{1to4} + +// CHECK: vcvttpd2uqqs ymm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0xfd,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2uqqs ymm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttpd2uqqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0xfd,0xaf,0x6c,0x51,0x7f] + vcvttpd2uqqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttpd2uqqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to4} +// CHECK: encoding: [0x62,0xf5,0xfd,0xbf,0x6c,0x52,0x80] + vcvttpd2uqqs ymm2 {k7} {z}, qword ptr [edx - 1024]{1to4} + +// CHECK: vcvttpd2uqqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttpd2uqqs zmm2, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttpd2uqqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0xfd,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttpd2uqqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttpd2uqqs zmm2, qword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfd,0x58,0x6c,0x10] + vcvttpd2uqqs zmm2, qword ptr [eax]{1to8} + +// CHECK: vcvttpd2uqqs zmm2, zmmword ptr [2*ebp - 2048] +// CHECK: encoding: [0x62,0xf5,0xfd,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2uqqs zmm2, zmmword ptr [2*ebp - 2048] + +// CHECK: vcvttpd2uqqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf5,0xfd,0xcf,0x6c,0x51,0x7f] + vcvttpd2uqqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] + +// CHECK: vcvttpd2uqqs zmm2 {k7} {z}, qword ptr [edx - 1024]{1to8} +// CHECK: encoding: [0x62,0xf5,0xfd,0xdf,0x6c,0x52,0x80] + vcvttpd2uqqs zmm2 {k7} {z}, qword ptr [edx - 1024]{1to8} + +// CHECK: vcvttps2dqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6d,0xd3] + vcvttps2dqs xmm2, xmm3 + +// CHECK: vcvttps2dqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6d,0xd3] + vcvttps2dqs xmm2 {k7}, xmm3 + +// CHECK: vcvttps2dqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6d,0xd3] + vcvttps2dqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttps2dqs ymm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6d,0xd3] + vcvttps2dqs ymm2, ymm3 + +// CHECK: vcvttps2dqs ymm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6d,0xd3] + vcvttps2dqs ymm2, ymm3, {sae} + +// CHECK: vcvttps2dqs ymm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6d,0xd3] + vcvttps2dqs ymm2 {k7}, ymm3 + +// CHECK: vcvttps2dqs ymm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x6d,0xd3] + vcvttps2dqs ymm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttps2dqs zmm2, zmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6d,0xd3] + vcvttps2dqs zmm2, zmm3 + +// CHECK: vcvttps2dqs zmm2, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6d,0xd3] + vcvttps2dqs zmm2, zmm3, {sae} + +// CHECK: vcvttps2dqs zmm2 {k7}, zmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6d,0xd3] + vcvttps2dqs zmm2 {k7}, zmm3 + +// CHECK: vcvttps2dqs zmm2 {k7} {z}, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6d,0xd3] + vcvttps2dqs zmm2 {k7} {z}, zmm3, {sae} + +// CHECK: vcvttps2dqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2dqs xmm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2dqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2dqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2dqs xmm2, dword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6d,0x10] + vcvttps2dqs xmm2, dword ptr [eax]{1to4} + +// CHECK: vcvttps2dqs xmm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2dqs xmm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttps2dqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6d,0x51,0x7f] + vcvttps2dqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttps2dqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6d,0x52,0x80] + vcvttps2dqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4} + +// CHECK: vcvttps2dqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2dqs ymm2, ymmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2dqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2dqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2dqs ymm2, dword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6d,0x10] + vcvttps2dqs ymm2, dword ptr [eax]{1to8} + +// CHECK: vcvttps2dqs ymm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2dqs ymm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttps2dqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6d,0x51,0x7f] + vcvttps2dqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttps2dqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6d,0x52,0x80] + vcvttps2dqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8} + +// CHECK: vcvttps2dqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2dqs zmm2, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2dqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2dqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2dqs zmm2, dword ptr [eax]{1to16} +// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6d,0x10] + vcvttps2dqs zmm2, dword ptr [eax]{1to16} + +// CHECK: vcvttps2dqs zmm2, zmmword ptr [2*ebp - 2048] +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6d,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2dqs zmm2, zmmword ptr [2*ebp - 2048] + +// CHECK: vcvttps2dqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6d,0x51,0x7f] + vcvttps2dqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] + +// CHECK: vcvttps2dqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16} +// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6d,0x52,0x80] + vcvttps2dqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16} + +// CHECK: vcvttps2qqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6d,0xd3] + vcvttps2qqs xmm2, xmm3 + +// CHECK: vcvttps2qqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6d,0xd3] + vcvttps2qqs xmm2 {k7}, xmm3 + +// CHECK: vcvttps2qqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6d,0xd3] + vcvttps2qqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttps2qqs ymm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6d,0xd3] + vcvttps2qqs ymm2, xmm3 + +// CHECK: vcvttps2qqs ymm2, xmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6d,0xd3] + vcvttps2qqs ymm2, xmm3, {sae} + +// CHECK: vcvttps2qqs ymm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6d,0xd3] + vcvttps2qqs ymm2 {k7}, xmm3 + +// CHECK: vcvttps2qqs ymm2 {k7} {z}, xmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x6d,0xd3] + vcvttps2qqs ymm2 {k7} {z}, xmm3, {sae} + +// CHECK: vcvttps2qqs zmm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6d,0xd3] + vcvttps2qqs zmm2, ymm3 + +// CHECK: vcvttps2qqs zmm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6d,0xd3] + vcvttps2qqs zmm2, ymm3, {sae} + +// CHECK: vcvttps2qqs zmm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6d,0xd3] + vcvttps2qqs zmm2 {k7}, ymm3 + +// CHECK: vcvttps2qqs zmm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6d,0xd3] + vcvttps2qqs zmm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttps2qqs xmm2, qword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2qqs xmm2, qword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2qqs xmm2 {k7}, qword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2qqs xmm2 {k7}, qword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2qqs xmm2, dword ptr [eax]{1to2} +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6d,0x10] + vcvttps2qqs xmm2, dword ptr [eax]{1to2} + +// CHECK: vcvttps2qqs xmm2, qword ptr [2*ebp - 256] +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6d,0x14,0x6d,0x00,0xff,0xff,0xff] + vcvttps2qqs xmm2, qword ptr [2*ebp - 256] + +// CHECK: vcvttps2qqs xmm2 {k7} {z}, qword ptr [ecx + 1016] +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6d,0x51,0x7f] + vcvttps2qqs xmm2 {k7} {z}, qword ptr [ecx + 1016] + +// CHECK: vcvttps2qqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to2} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6d,0x52,0x80] + vcvttps2qqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to2} + +// CHECK: vcvttps2qqs ymm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2qqs ymm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2qqs ymm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2qqs ymm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2qqs ymm2, dword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6d,0x10] + vcvttps2qqs ymm2, dword ptr [eax]{1to4} + +// CHECK: vcvttps2qqs ymm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6d,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2qqs ymm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttps2qqs ymm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6d,0x51,0x7f] + vcvttps2qqs ymm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttps2qqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6d,0x52,0x80] + vcvttps2qqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to4} + +// CHECK: vcvttps2qqs zmm2, ymmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6d,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2qqs zmm2, ymmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2qqs zmm2 {k7}, ymmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6d,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2qqs zmm2 {k7}, ymmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2qqs zmm2, dword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6d,0x10] + vcvttps2qqs zmm2, dword ptr [eax]{1to8} + +// CHECK: vcvttps2qqs zmm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6d,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2qqs zmm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttps2qqs zmm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6d,0x51,0x7f] + vcvttps2qqs zmm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttps2qqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6d,0x52,0x80] + vcvttps2qqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to8} + +// CHECK: vcvttps2udqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6c,0xd3] + vcvttps2udqs xmm2, xmm3 + +// CHECK: vcvttps2udqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6c,0xd3] + vcvttps2udqs xmm2 {k7}, xmm3 + +// CHECK: vcvttps2udqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6c,0xd3] + vcvttps2udqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttps2udqs ymm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6c,0xd3] + vcvttps2udqs ymm2, ymm3 + +// CHECK: vcvttps2udqs ymm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x78,0x18,0x6c,0xd3] + vcvttps2udqs ymm2, ymm3, {sae} + +// CHECK: vcvttps2udqs ymm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6c,0xd3] + vcvttps2udqs ymm2 {k7}, ymm3 + +// CHECK: vcvttps2udqs ymm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x78,0x9f,0x6c,0xd3] + vcvttps2udqs ymm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttps2udqs zmm2, zmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6c,0xd3] + vcvttps2udqs zmm2, zmm3 + +// CHECK: vcvttps2udqs zmm2, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6c,0xd3] + vcvttps2udqs zmm2, zmm3, {sae} + +// CHECK: vcvttps2udqs zmm2 {k7}, zmm3 +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6c,0xd3] + vcvttps2udqs zmm2 {k7}, zmm3 + +// CHECK: vcvttps2udqs zmm2 {k7} {z}, zmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6c,0xd3] + vcvttps2udqs zmm2 {k7} {z}, zmm3, {sae} + +// CHECK: vcvttps2udqs xmm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2udqs xmm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2udqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7c,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2udqs xmm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2udqs xmm2, dword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7c,0x18,0x6c,0x10] + vcvttps2udqs xmm2, dword ptr [eax]{1to4} + +// CHECK: vcvttps2udqs xmm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0x7c,0x08,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2udqs xmm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttps2udqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0x7c,0x8f,0x6c,0x51,0x7f] + vcvttps2udqs xmm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttps2udqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7c,0x9f,0x6c,0x52,0x80] + vcvttps2udqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to4} + +// CHECK: vcvttps2udqs ymm2, ymmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2udqs ymm2, ymmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2udqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7c,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2udqs ymm2 {k7}, ymmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2udqs ymm2, dword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7c,0x38,0x6c,0x10] + vcvttps2udqs ymm2, dword ptr [eax]{1to8} + +// CHECK: vcvttps2udqs ymm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0x7c,0x28,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2udqs ymm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttps2udqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0x7c,0xaf,0x6c,0x51,0x7f] + vcvttps2udqs ymm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttps2udqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x6c,0x52,0x80] + vcvttps2udqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to8} + +// CHECK: vcvttps2udqs zmm2, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2udqs zmm2, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2udqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7c,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2udqs zmm2 {k7}, zmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2udqs zmm2, dword ptr [eax]{1to16} +// CHECK: encoding: [0x62,0xf5,0x7c,0x58,0x6c,0x10] + vcvttps2udqs zmm2, dword ptr [eax]{1to16} + +// CHECK: vcvttps2udqs zmm2, zmmword ptr [2*ebp - 2048] +// CHECK: encoding: [0x62,0xf5,0x7c,0x48,0x6c,0x14,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2udqs zmm2, zmmword ptr [2*ebp - 2048] + +// CHECK: vcvttps2udqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf5,0x7c,0xcf,0x6c,0x51,0x7f] + vcvttps2udqs zmm2 {k7} {z}, zmmword ptr [ecx + 8128] + +// CHECK: vcvttps2udqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16} +// CHECK: encoding: [0x62,0xf5,0x7c,0xdf,0x6c,0x52,0x80] + vcvttps2udqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to16} + +// CHECK: vcvttps2uqqs xmm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6c,0xd3] + vcvttps2uqqs xmm2, xmm3 + +// CHECK: vcvttps2uqqs xmm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6c,0xd3] + vcvttps2uqqs xmm2 {k7}, xmm3 + +// CHECK: vcvttps2uqqs xmm2 {k7} {z}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6c,0xd3] + vcvttps2uqqs xmm2 {k7} {z}, xmm3 + +// CHECK: vcvttps2uqqs ymm2, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6c,0xd3] + vcvttps2uqqs ymm2, xmm3 + +// CHECK: vcvttps2uqqs ymm2, xmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x79,0x18,0x6c,0xd3] + vcvttps2uqqs ymm2, xmm3, {sae} + +// CHECK: vcvttps2uqqs ymm2 {k7}, xmm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6c,0xd3] + vcvttps2uqqs ymm2 {k7}, xmm3 + +// CHECK: vcvttps2uqqs ymm2 {k7} {z}, xmm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x79,0x9f,0x6c,0xd3] + vcvttps2uqqs ymm2 {k7} {z}, xmm3, {sae} + +// CHECK: vcvttps2uqqs zmm2, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6c,0xd3] + vcvttps2uqqs zmm2, ymm3 + +// CHECK: vcvttps2uqqs zmm2, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6c,0xd3] + vcvttps2uqqs zmm2, ymm3, {sae} + +// CHECK: vcvttps2uqqs zmm2 {k7}, ymm3 +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6c,0xd3] + vcvttps2uqqs zmm2 {k7}, ymm3 + +// CHECK: vcvttps2uqqs zmm2 {k7} {z}, ymm3, {sae} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6c,0xd3] + vcvttps2uqqs zmm2 {k7} {z}, ymm3, {sae} + +// CHECK: vcvttps2uqqs xmm2, qword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2uqqs xmm2, qword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2uqqs xmm2 {k7}, qword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7d,0x0f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2uqqs xmm2 {k7}, qword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2uqqs xmm2, dword ptr [eax]{1to2} +// CHECK: encoding: [0x62,0xf5,0x7d,0x18,0x6c,0x10] + vcvttps2uqqs xmm2, dword ptr [eax]{1to2} + +// CHECK: vcvttps2uqqs xmm2, qword ptr [2*ebp - 256] +// CHECK: encoding: [0x62,0xf5,0x7d,0x08,0x6c,0x14,0x6d,0x00,0xff,0xff,0xff] + vcvttps2uqqs xmm2, qword ptr [2*ebp - 256] + +// CHECK: vcvttps2uqqs xmm2 {k7} {z}, qword ptr [ecx + 1016] +// CHECK: encoding: [0x62,0xf5,0x7d,0x8f,0x6c,0x51,0x7f] + vcvttps2uqqs xmm2 {k7} {z}, qword ptr [ecx + 1016] + +// CHECK: vcvttps2uqqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to2} +// CHECK: encoding: [0x62,0xf5,0x7d,0x9f,0x6c,0x52,0x80] + vcvttps2uqqs xmm2 {k7} {z}, dword ptr [edx - 512]{1to2} + +// CHECK: vcvttps2uqqs ymm2, xmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2uqqs ymm2, xmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2uqqs ymm2 {k7}, xmmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7d,0x2f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2uqqs ymm2 {k7}, xmmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2uqqs ymm2, dword ptr [eax]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7d,0x38,0x6c,0x10] + vcvttps2uqqs ymm2, dword ptr [eax]{1to4} + +// CHECK: vcvttps2uqqs ymm2, xmmword ptr [2*ebp - 512] +// CHECK: encoding: [0x62,0xf5,0x7d,0x28,0x6c,0x14,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2uqqs ymm2, xmmword ptr [2*ebp - 512] + +// CHECK: vcvttps2uqqs ymm2 {k7} {z}, xmmword ptr [ecx + 2032] +// CHECK: encoding: [0x62,0xf5,0x7d,0xaf,0x6c,0x51,0x7f] + vcvttps2uqqs ymm2 {k7} {z}, xmmword ptr [ecx + 2032] + +// CHECK: vcvttps2uqqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to4} +// CHECK: encoding: [0x62,0xf5,0x7d,0xbf,0x6c,0x52,0x80] + vcvttps2uqqs ymm2 {k7} {z}, dword ptr [edx - 512]{1to4} + +// CHECK: vcvttps2uqqs zmm2, ymmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6c,0x94,0xf4,0x00,0x00,0x00,0x10] + vcvttps2uqqs zmm2, ymmword ptr [esp + 8*esi + 268435456] + +// CHECK: vcvttps2uqqs zmm2 {k7}, ymmword ptr [edi + 4*eax + 291] +// CHECK: encoding: [0x62,0xf5,0x7d,0x4f,0x6c,0x94,0x87,0x23,0x01,0x00,0x00] + vcvttps2uqqs zmm2 {k7}, ymmword ptr [edi + 4*eax + 291] + +// CHECK: vcvttps2uqqs zmm2, dword ptr [eax]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7d,0x58,0x6c,0x10] + vcvttps2uqqs zmm2, dword ptr [eax]{1to8} + +// CHECK: vcvttps2uqqs zmm2, ymmword ptr [2*ebp - 1024] +// CHECK: encoding: [0x62,0xf5,0x7d,0x48,0x6c,0x14,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2uqqs zmm2, ymmword ptr [2*ebp - 1024] + +// CHECK: vcvttps2uqqs zmm2 {k7} {z}, ymmword ptr [ecx + 4064] +// CHECK: encoding: [0x62,0xf5,0x7d,0xcf,0x6c,0x51,0x7f] + vcvttps2uqqs zmm2 {k7} {z}, ymmword ptr [ecx + 4064] + +// CHECK: vcvttps2uqqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to8} +// CHECK: encoding: [0x62,0xf5,0x7d,0xdf,0x6c,0x52,0x80] + vcvttps2uqqs zmm2 {k7} {z}, dword ptr [edx - 512]{1to8} + diff --git a/llvm/test/MC/X86/avx10_2satcvtds-64-att.s b/llvm/test/MC/X86/avx10_2satcvtds-64-att.s new file mode 100644 index 0000000..c653bf5 --- /dev/null +++ b/llvm/test/MC/X86/avx10_2satcvtds-64-att.s @@ -0,0 +1,1170 @@ +// RUN: llvm-mc -triple x86_64 --show-encoding %s | FileCheck %s + +// CHECK: vcvttsd2sis %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6d,0xce] + vcvttsd2sis %xmm22, %ecx + +// CHECK: vcvttsd2sis {sae}, %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7f,0x18,0x6d,0xce] + vcvttsd2sis {sae}, %xmm22, %ecx + +// CHECK: vcvttsd2sis %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6d,0xce] + vcvttsd2sis %xmm22, %r9 + +// CHECK: vcvttsd2sis {sae}, %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xff,0x18,0x6d,0xce] + vcvttsd2sis {sae}, %xmm22, %r9 + +// CHECK: vcvttsd2sis 268435456(%rbp,%r14,8), %ecx +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2sis 268435456(%rbp,%r14,8), %ecx + +// CHECK: vcvttsd2sis 291(%r8,%rax,4), %ecx +// CHECK: encoding: [0x62,0xd5,0x7f,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2sis 291(%r8,%rax,4), %ecx + +// CHECK: vcvttsd2sis (%rip), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2sis (%rip), %ecx + +// CHECK: vcvttsd2sis -256(,%rbp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2sis -256(,%rbp,2), %ecx + +// CHECK: vcvttsd2sis 1016(%rcx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x49,0x7f] + vcvttsd2sis 1016(%rcx), %ecx + +// CHECK: vcvttsd2sis -1024(%rdx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x4a,0x80] + vcvttsd2sis -1024(%rdx), %ecx + +// CHECK: vcvttsd2sis 268435456(%rbp,%r14,8), %r9 +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2sis 268435456(%rbp,%r14,8), %r9 + +// CHECK: vcvttsd2sis 291(%r8,%rax,4), %r9 +// CHECK: encoding: [0x62,0x55,0xff,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2sis 291(%r8,%rax,4), %r9 + +// CHECK: vcvttsd2sis (%rip), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2sis (%rip), %r9 + +// CHECK: vcvttsd2sis -256(,%rbp,2), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2sis -256(,%rbp,2), %r9 + +// CHECK: vcvttsd2sis 1016(%rcx), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x49,0x7f] + vcvttsd2sis 1016(%rcx), %r9 + +// CHECK: vcvttsd2sis -1024(%rdx), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x4a,0x80] + vcvttsd2sis -1024(%rdx), %r9 + +// CHECK: vcvttsd2usis %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6c,0xce] + vcvttsd2usis %xmm22, %ecx + +// CHECK: vcvttsd2usis {sae}, %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7f,0x18,0x6c,0xce] + vcvttsd2usis {sae}, %xmm22, %ecx + +// CHECK: vcvttsd2usis %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6c,0xce] + vcvttsd2usis %xmm22, %r9 + +// CHECK: vcvttsd2usis {sae}, %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xff,0x18,0x6c,0xce] + vcvttsd2usis {sae}, %xmm22, %r9 + +// CHECK: vcvttsd2usis 268435456(%rbp,%r14,8), %ecx +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2usis 268435456(%rbp,%r14,8), %ecx + +// CHECK: vcvttsd2usis 291(%r8,%rax,4), %ecx +// CHECK: encoding: [0x62,0xd5,0x7f,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2usis 291(%r8,%rax,4), %ecx + +// CHECK: vcvttsd2usis (%rip), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2usis (%rip), %ecx + +// CHECK: vcvttsd2usis -256(,%rbp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2usis -256(,%rbp,2), %ecx + +// CHECK: vcvttsd2usis 1016(%rcx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x49,0x7f] + vcvttsd2usis 1016(%rcx), %ecx + +// CHECK: vcvttsd2usis -1024(%rdx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x4a,0x80] + vcvttsd2usis -1024(%rdx), %ecx + +// CHECK: vcvttsd2usis 268435456(%rbp,%r14,8), %r9 +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2usis 268435456(%rbp,%r14,8), %r9 + +// CHECK: vcvttsd2usis 291(%r8,%rax,4), %r9 +// CHECK: encoding: [0x62,0x55,0xff,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2usis 291(%r8,%rax,4), %r9 + +// CHECK: vcvttsd2usis (%rip), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2usis (%rip), %r9 + +// CHECK: vcvttsd2usis -256(,%rbp,2), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2usis -256(,%rbp,2), %r9 + +// CHECK: vcvttsd2usis 1016(%rcx), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x49,0x7f] + vcvttsd2usis 1016(%rcx), %r9 + +// CHECK: vcvttsd2usis -1024(%rdx), %r9 +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x4a,0x80] + vcvttsd2usis -1024(%rdx), %r9 + +// CHECK: vcvttss2sis %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6d,0xce] + vcvttss2sis %xmm22, %ecx + +// CHECK: vcvttss2sis {sae}, %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7e,0x18,0x6d,0xce] + vcvttss2sis {sae}, %xmm22, %ecx + +// CHECK: vcvttss2sis %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6d,0xce] + vcvttss2sis %xmm22, %r9 + +// CHECK: vcvttss2sis {sae}, %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xfe,0x18,0x6d,0xce] + vcvttss2sis {sae}, %xmm22, %r9 + +// CHECK: vcvttss2sis 268435456(%rbp,%r14,8), %ecx +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2sis 268435456(%rbp,%r14,8), %ecx + +// CHECK: vcvttss2sis 291(%r8,%rax,4), %ecx +// CHECK: encoding: [0x62,0xd5,0x7e,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2sis 291(%r8,%rax,4), %ecx + +// CHECK: vcvttss2sis (%rip), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttss2sis (%rip), %ecx + +// CHECK: vcvttss2sis -128(,%rbp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2sis -128(,%rbp,2), %ecx + +// CHECK: vcvttss2sis 508(%rcx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x49,0x7f] + vcvttss2sis 508(%rcx), %ecx + +// CHECK: vcvttss2sis -512(%rdx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x4a,0x80] + vcvttss2sis -512(%rdx), %ecx + +// CHECK: vcvttss2sis 268435456(%rbp,%r14,8), %r9 +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2sis 268435456(%rbp,%r14,8), %r9 + +// CHECK: vcvttss2sis 291(%r8,%rax,4), %r9 +// CHECK: encoding: [0x62,0x55,0xfe,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2sis 291(%r8,%rax,4), %r9 + +// CHECK: vcvttss2sis (%rip), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttss2sis (%rip), %r9 + +// CHECK: vcvttss2sis -128(,%rbp,2), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2sis -128(,%rbp,2), %r9 + +// CHECK: vcvttss2sis 508(%rcx), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x49,0x7f] + vcvttss2sis 508(%rcx), %r9 + +// CHECK: vcvttss2sis -512(%rdx), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x4a,0x80] + vcvttss2sis -512(%rdx), %r9 + +// CHECK: vcvttss2usis %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6c,0xce] + vcvttss2usis %xmm22, %ecx + +// CHECK: vcvttss2usis {sae}, %xmm22, %ecx +// CHECK: encoding: [0x62,0xb5,0x7e,0x18,0x6c,0xce] + vcvttss2usis {sae}, %xmm22, %ecx + +// CHECK: vcvttss2usis %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6c,0xce] + vcvttss2usis %xmm22, %r9 + +// CHECK: vcvttss2usis {sae}, %xmm22, %r9 +// CHECK: encoding: [0x62,0x35,0xfe,0x18,0x6c,0xce] + vcvttss2usis {sae}, %xmm22, %r9 + +// CHECK: vcvttss2usis 268435456(%rbp,%r14,8), %ecx +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2usis 268435456(%rbp,%r14,8), %ecx + +// CHECK: vcvttss2usis 291(%r8,%rax,4), %ecx +// CHECK: encoding: [0x62,0xd5,0x7e,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2usis 291(%r8,%rax,4), %ecx + +// CHECK: vcvttss2usis (%rip), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttss2usis (%rip), %ecx + +// CHECK: vcvttss2usis -128(,%rbp,2), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2usis -128(,%rbp,2), %ecx + +// CHECK: vcvttss2usis 508(%rcx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x49,0x7f] + vcvttss2usis 508(%rcx), %ecx + +// CHECK: vcvttss2usis -512(%rdx), %ecx +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x4a,0x80] + vcvttss2usis -512(%rdx), %ecx + +// CHECK: vcvttss2usis 268435456(%rbp,%r14,8), %r9 +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2usis 268435456(%rbp,%r14,8), %r9 + +// CHECK: vcvttss2usis 291(%r8,%rax,4), %r9 +// CHECK: encoding: [0x62,0x55,0xfe,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2usis 291(%r8,%rax,4), %r9 + +// CHECK: vcvttss2usis (%rip), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttss2usis (%rip), %r9 + +// CHECK: vcvttss2usis -128(,%rbp,2), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2usis -128(,%rbp,2), %r9 + +// CHECK: vcvttss2usis 508(%rcx), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x49,0x7f] + vcvttss2usis 508(%rcx), %r9 + +// CHECK: vcvttss2usis -512(%rdx), %r9 +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x4a,0x80] + vcvttss2usis -512(%rdx), %r9 + +// CHECK: vcvttpd2dqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6d,0xf7] + vcvttpd2dqs %xmm23, %xmm22 + +// CHECK: vcvttpd2dqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfc,0x0f,0x6d,0xf7] + vcvttpd2dqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttpd2dqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfc,0x8f,0x6d,0xf7] + vcvttpd2dqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2dqs %ymm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x28,0x6d,0xf7] + vcvttpd2dqs %ymm23, %xmm22 + +// CHECK: vcvttpd2dqs {sae}, %ymm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xf8,0x18,0x6d,0xf7] + vcvttpd2dqs {sae}, %ymm23, %xmm22 + +// CHECK: vcvttpd2dqs %ymm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfc,0x2f,0x6d,0xf7] + vcvttpd2dqs %ymm23, %xmm22 {%k7} + +// CHECK: vcvttpd2dqs {sae}, %ymm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xf8,0x9f,0x6d,0xf7] + vcvttpd2dqs {sae}, %ymm23, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2dqs %zmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6d,0xf7] + vcvttpd2dqs %zmm23, %ymm22 + +// CHECK: vcvttpd2dqs {sae}, %zmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x18,0x6d,0xf7] + vcvttpd2dqs {sae}, %zmm23, %ymm22 + +// CHECK: vcvttpd2dqs %zmm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfc,0x4f,0x6d,0xf7] + vcvttpd2dqs %zmm23, %ymm22 {%k7} + +// CHECK: vcvttpd2dqs {sae}, %zmm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfc,0x9f,0x6d,0xf7] + vcvttpd2dqs {sae}, %zmm23, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2dqsx 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2dqsx 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttpd2dqsx 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfc,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2dqsx 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttpd2dqs (%rip){1to2}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2dqs (%rip){1to2}, %xmm22 + +// CHECK: vcvttpd2dqsx -512(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2dqsx -512(,%rbp,2), %xmm22 + +// CHECK: vcvttpd2dqsx 2032(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0x8f,0x6d,0x71,0x7f] + vcvttpd2dqsx 2032(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttpd2dqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0x9f,0x6d,0x72,0x80] + vcvttpd2dqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2dqs (%rip){1to4}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2dqs (%rip){1to4}, %xmm22 + +// CHECK: vcvttpd2dqsy -1024(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2dqsy -1024(,%rbp,2), %xmm22 + +// CHECK: vcvttpd2dqsy 4064(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xaf,0x6d,0x71,0x7f] + vcvttpd2dqsy 4064(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttpd2dqs -1024(%rdx){1to4}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xbf,0x6d,0x72,0x80] + vcvttpd2dqs -1024(%rdx){1to4}, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2dqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2dqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttpd2dqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfc,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2dqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttpd2dqs (%rip){1to8}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2dqs (%rip){1to8}, %ymm22 + +// CHECK: vcvttpd2dqs -2048(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2dqs -2048(,%rbp,2), %ymm22 + +// CHECK: vcvttpd2dqs 8128(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xcf,0x6d,0x71,0x7f] + vcvttpd2dqs 8128(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttpd2dqs -1024(%rdx){1to8}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xdf,0x6d,0x72,0x80] + vcvttpd2dqs -1024(%rdx){1to8}, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2qqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6d,0xf7] + vcvttpd2qqs %xmm23, %xmm22 + +// CHECK: vcvttpd2qqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfd,0x0f,0x6d,0xf7] + vcvttpd2qqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttpd2qqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfd,0x8f,0x6d,0xf7] + vcvttpd2qqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2qqs %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6d,0xf7] + vcvttpd2qqs %ymm23, %ymm22 + +// CHECK: vcvttpd2qqs {sae}, %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xf9,0x18,0x6d,0xf7] + vcvttpd2qqs {sae}, %ymm23, %ymm22 + +// CHECK: vcvttpd2qqs %ymm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfd,0x2f,0x6d,0xf7] + vcvttpd2qqs %ymm23, %ymm22 {%k7} + +// CHECK: vcvttpd2qqs {sae}, %ymm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xf9,0x9f,0x6d,0xf7] + vcvttpd2qqs {sae}, %ymm23, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2qqs %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6d,0xf7] + vcvttpd2qqs %zmm23, %zmm22 + +// CHECK: vcvttpd2qqs {sae}, %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x18,0x6d,0xf7] + vcvttpd2qqs {sae}, %zmm23, %zmm22 + +// CHECK: vcvttpd2qqs %zmm23, %zmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfd,0x4f,0x6d,0xf7] + vcvttpd2qqs %zmm23, %zmm22 {%k7} + +// CHECK: vcvttpd2qqs {sae}, %zmm23, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfd,0x9f,0x6d,0xf7] + vcvttpd2qqs {sae}, %zmm23, %zmm22 {%k7} {z} + +// CHECK: vcvttpd2qqs 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2qqs 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttpd2qqs 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfd,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2qqs 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttpd2qqs (%rip){1to2}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2qqs (%rip){1to2}, %xmm22 + +// CHECK: vcvttpd2qqs -512(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2qqs -512(,%rbp,2), %xmm22 + +// CHECK: vcvttpd2qqs 2032(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0x8f,0x6d,0x71,0x7f] + vcvttpd2qqs 2032(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttpd2qqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0x9f,0x6d,0x72,0x80] + vcvttpd2qqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2qqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2qqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttpd2qqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfd,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2qqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttpd2qqs (%rip){1to4}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2qqs (%rip){1to4}, %ymm22 + +// CHECK: vcvttpd2qqs -1024(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2qqs -1024(,%rbp,2), %ymm22 + +// CHECK: vcvttpd2qqs 4064(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xaf,0x6d,0x71,0x7f] + vcvttpd2qqs 4064(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttpd2qqs -1024(%rdx){1to4}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xbf,0x6d,0x72,0x80] + vcvttpd2qqs -1024(%rdx){1to4}, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2qqs 268435456(%rbp,%r14,8), %zmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2qqs 268435456(%rbp,%r14,8), %zmm22 + +// CHECK: vcvttpd2qqs 291(%r8,%rax,4), %zmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfd,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2qqs 291(%r8,%rax,4), %zmm22 {%k7} + +// CHECK: vcvttpd2qqs (%rip){1to8}, %zmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2qqs (%rip){1to8}, %zmm22 + +// CHECK: vcvttpd2qqs -2048(,%rbp,2), %zmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2qqs -2048(,%rbp,2), %zmm22 + +// CHECK: vcvttpd2qqs 8128(%rcx), %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xcf,0x6d,0x71,0x7f] + vcvttpd2qqs 8128(%rcx), %zmm22 {%k7} {z} + +// CHECK: vcvttpd2qqs -1024(%rdx){1to8}, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xdf,0x6d,0x72,0x80] + vcvttpd2qqs -1024(%rdx){1to8}, %zmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6c,0xf7] + vcvttpd2udqs %xmm23, %xmm22 + +// CHECK: vcvttpd2udqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfc,0x0f,0x6c,0xf7] + vcvttpd2udqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttpd2udqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfc,0x8f,0x6c,0xf7] + vcvttpd2udqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs %ymm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x28,0x6c,0xf7] + vcvttpd2udqs %ymm23, %xmm22 + +// CHECK: vcvttpd2udqs {sae}, %ymm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xf8,0x18,0x6c,0xf7] + vcvttpd2udqs {sae}, %ymm23, %xmm22 + +// CHECK: vcvttpd2udqs %ymm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfc,0x2f,0x6c,0xf7] + vcvttpd2udqs %ymm23, %xmm22 {%k7} + +// CHECK: vcvttpd2udqs {sae}, %ymm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xf8,0x9f,0x6c,0xf7] + vcvttpd2udqs {sae}, %ymm23, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs %zmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6c,0xf7] + vcvttpd2udqs %zmm23, %ymm22 + +// CHECK: vcvttpd2udqs {sae}, %zmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x18,0x6c,0xf7] + vcvttpd2udqs {sae}, %zmm23, %ymm22 + +// CHECK: vcvttpd2udqs %zmm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfc,0x4f,0x6c,0xf7] + vcvttpd2udqs %zmm23, %ymm22 {%k7} + +// CHECK: vcvttpd2udqs {sae}, %zmm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfc,0x9f,0x6c,0xf7] + vcvttpd2udqs {sae}, %zmm23, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2udqsx 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2udqsx 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttpd2udqsx 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfc,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2udqsx 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttpd2udqs (%rip){1to2}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2udqs (%rip){1to2}, %xmm22 + +// CHECK: vcvttpd2udqsx -512(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2udqsx -512(,%rbp,2), %xmm22 + +// CHECK: vcvttpd2udqsx 2032(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0x8f,0x6c,0x71,0x7f] + vcvttpd2udqsx 2032(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0x9f,0x6c,0x72,0x80] + vcvttpd2udqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs (%rip){1to4}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2udqs (%rip){1to4}, %xmm22 + +// CHECK: vcvttpd2udqsy -1024(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2udqsy -1024(,%rbp,2), %xmm22 + +// CHECK: vcvttpd2udqsy 4064(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xaf,0x6c,0x71,0x7f] + vcvttpd2udqsy 4064(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs -1024(%rdx){1to4}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xbf,0x6c,0x72,0x80] + vcvttpd2udqs -1024(%rdx){1to4}, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2udqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2udqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttpd2udqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfc,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2udqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttpd2udqs (%rip){1to8}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2udqs (%rip){1to8}, %ymm22 + +// CHECK: vcvttpd2udqs -2048(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfc,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2udqs -2048(,%rbp,2), %ymm22 + +// CHECK: vcvttpd2udqs 8128(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xcf,0x6c,0x71,0x7f] + vcvttpd2udqs 8128(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttpd2udqs -1024(%rdx){1to8}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfc,0xdf,0x6c,0x72,0x80] + vcvttpd2udqs -1024(%rdx){1to8}, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6c,0xf7] + vcvttpd2uqqs %xmm23, %xmm22 + +// CHECK: vcvttpd2uqqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfd,0x0f,0x6c,0xf7] + vcvttpd2uqqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttpd2uqqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfd,0x8f,0x6c,0xf7] + vcvttpd2uqqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6c,0xf7] + vcvttpd2uqqs %ymm23, %ymm22 + +// CHECK: vcvttpd2uqqs {sae}, %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0xf9,0x18,0x6c,0xf7] + vcvttpd2uqqs {sae}, %ymm23, %ymm22 + +// CHECK: vcvttpd2uqqs %ymm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfd,0x2f,0x6c,0xf7] + vcvttpd2uqqs %ymm23, %ymm22 {%k7} + +// CHECK: vcvttpd2uqqs {sae}, %ymm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xf9,0x9f,0x6c,0xf7] + vcvttpd2uqqs {sae}, %ymm23, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6c,0xf7] + vcvttpd2uqqs %zmm23, %zmm22 + +// CHECK: vcvttpd2uqqs {sae}, %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x18,0x6c,0xf7] + vcvttpd2uqqs {sae}, %zmm23, %zmm22 + +// CHECK: vcvttpd2uqqs %zmm23, %zmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0xfd,0x4f,0x6c,0xf7] + vcvttpd2uqqs %zmm23, %zmm22 {%k7} + +// CHECK: vcvttpd2uqqs {sae}, %zmm23, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0xfd,0x9f,0x6c,0xf7] + vcvttpd2uqqs {sae}, %zmm23, %zmm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2uqqs 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttpd2uqqs 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfd,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2uqqs 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttpd2uqqs (%rip){1to2}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2uqqs (%rip){1to2}, %xmm22 + +// CHECK: vcvttpd2uqqs -512(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2uqqs -512(,%rbp,2), %xmm22 + +// CHECK: vcvttpd2uqqs 2032(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0x8f,0x6c,0x71,0x7f] + vcvttpd2uqqs 2032(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0x9f,0x6c,0x72,0x80] + vcvttpd2uqqs -1024(%rdx){1to2}, %xmm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2uqqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttpd2uqqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfd,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2uqqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttpd2uqqs (%rip){1to4}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2uqqs (%rip){1to4}, %ymm22 + +// CHECK: vcvttpd2uqqs -1024(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2uqqs -1024(,%rbp,2), %ymm22 + +// CHECK: vcvttpd2uqqs 4064(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xaf,0x6c,0x71,0x7f] + vcvttpd2uqqs 4064(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs -1024(%rdx){1to4}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xbf,0x6c,0x72,0x80] + vcvttpd2uqqs -1024(%rdx){1to4}, %ymm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs 268435456(%rbp,%r14,8), %zmm22 +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2uqqs 268435456(%rbp,%r14,8), %zmm22 + +// CHECK: vcvttpd2uqqs 291(%r8,%rax,4), %zmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0xfd,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2uqqs 291(%r8,%rax,4), %zmm22 {%k7} + +// CHECK: vcvttpd2uqqs (%rip){1to8}, %zmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2uqqs (%rip){1to8}, %zmm22 + +// CHECK: vcvttpd2uqqs -2048(,%rbp,2), %zmm22 +// CHECK: encoding: [0x62,0xe5,0xfd,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2uqqs -2048(,%rbp,2), %zmm22 + +// CHECK: vcvttpd2uqqs 8128(%rcx), %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xcf,0x6c,0x71,0x7f] + vcvttpd2uqqs 8128(%rcx), %zmm22 {%k7} {z} + +// CHECK: vcvttpd2uqqs -1024(%rdx){1to8}, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0xfd,0xdf,0x6c,0x72,0x80] + vcvttpd2uqqs -1024(%rdx){1to8}, %zmm22 {%k7} {z} + +// CHECK: vcvttps2dqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6d,0xf7] + vcvttps2dqs %xmm23, %xmm22 + +// CHECK: vcvttps2dqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6d,0xf7] + vcvttps2dqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttps2dqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6d,0xf7] + vcvttps2dqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttps2dqs %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6d,0xf7] + vcvttps2dqs %ymm23, %ymm22 + +// CHECK: vcvttps2dqs {sae}, %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6d,0xf7] + vcvttps2dqs {sae}, %ymm23, %ymm22 + +// CHECK: vcvttps2dqs %ymm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6d,0xf7] + vcvttps2dqs %ymm23, %ymm22 {%k7} + +// CHECK: vcvttps2dqs {sae}, %ymm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x6d,0xf7] + vcvttps2dqs {sae}, %ymm23, %ymm22 {%k7} {z} + +// CHECK: vcvttps2dqs %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6d,0xf7] + vcvttps2dqs %zmm23, %zmm22 + +// CHECK: vcvttps2dqs {sae}, %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6d,0xf7] + vcvttps2dqs {sae}, %zmm23, %zmm22 + +// CHECK: vcvttps2dqs %zmm23, %zmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6d,0xf7] + vcvttps2dqs %zmm23, %zmm22 {%k7} + +// CHECK: vcvttps2dqs {sae}, %zmm23, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x6d,0xf7] + vcvttps2dqs {sae}, %zmm23, %zmm22 {%k7} {z} + +// CHECK: vcvttps2dqs 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2dqs 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttps2dqs 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2dqs 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttps2dqs (%rip){1to4}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2dqs (%rip){1to4}, %xmm22 + +// CHECK: vcvttps2dqs -512(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2dqs -512(,%rbp,2), %xmm22 + +// CHECK: vcvttps2dqs 2032(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6d,0x71,0x7f] + vcvttps2dqs 2032(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttps2dqs -512(%rdx){1to4}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6d,0x72,0x80] + vcvttps2dqs -512(%rdx){1to4}, %xmm22 {%k7} {z} + +// CHECK: vcvttps2dqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2dqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttps2dqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2dqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttps2dqs (%rip){1to8}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2dqs (%rip){1to8}, %ymm22 + +// CHECK: vcvttps2dqs -1024(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2dqs -1024(,%rbp,2), %ymm22 + +// CHECK: vcvttps2dqs 4064(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6d,0x71,0x7f] + vcvttps2dqs 4064(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttps2dqs -512(%rdx){1to8}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6d,0x72,0x80] + vcvttps2dqs -512(%rdx){1to8}, %ymm22 {%k7} {z} + +// CHECK: vcvttps2dqs 268435456(%rbp,%r14,8), %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2dqs 268435456(%rbp,%r14,8), %zmm22 + +// CHECK: vcvttps2dqs 291(%r8,%rax,4), %zmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2dqs 291(%r8,%rax,4), %zmm22 {%k7} + +// CHECK: vcvttps2dqs (%rip){1to16}, %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2dqs (%rip){1to16}, %zmm22 + +// CHECK: vcvttps2dqs -2048(,%rbp,2), %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2dqs -2048(,%rbp,2), %zmm22 + +// CHECK: vcvttps2dqs 8128(%rcx), %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6d,0x71,0x7f] + vcvttps2dqs 8128(%rcx), %zmm22 {%k7} {z} + +// CHECK: vcvttps2dqs -512(%rdx){1to16}, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6d,0x72,0x80] + vcvttps2dqs -512(%rdx){1to16}, %zmm22 {%k7} {z} + +// CHECK: vcvttps2qqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6d,0xf7] + vcvttps2qqs %xmm23, %xmm22 + +// CHECK: vcvttps2qqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6d,0xf7] + vcvttps2qqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttps2qqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6d,0xf7] + vcvttps2qqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttps2qqs %xmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6d,0xf7] + vcvttps2qqs %xmm23, %ymm22 + +// CHECK: vcvttps2qqs {sae}, %xmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6d,0xf7] + vcvttps2qqs {sae}, %xmm23, %ymm22 + +// CHECK: vcvttps2qqs %xmm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6d,0xf7] + vcvttps2qqs %xmm23, %ymm22 {%k7} + +// CHECK: vcvttps2qqs {sae}, %xmm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x6d,0xf7] + vcvttps2qqs {sae}, %xmm23, %ymm22 {%k7} {z} + +// CHECK: vcvttps2qqs %ymm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6d,0xf7] + vcvttps2qqs %ymm23, %zmm22 + +// CHECK: vcvttps2qqs {sae}, %ymm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6d,0xf7] + vcvttps2qqs {sae}, %ymm23, %zmm22 + +// CHECK: vcvttps2qqs %ymm23, %zmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6d,0xf7] + vcvttps2qqs %ymm23, %zmm22 {%k7} + +// CHECK: vcvttps2qqs {sae}, %ymm23, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x6d,0xf7] + vcvttps2qqs {sae}, %ymm23, %zmm22 {%k7} {z} + +// CHECK: vcvttps2qqs 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2qqs 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttps2qqs 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2qqs 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttps2qqs (%rip){1to2}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2qqs (%rip){1to2}, %xmm22 + +// CHECK: vcvttps2qqs -256(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6d,0x34,0x6d,0x00,0xff,0xff,0xff] + vcvttps2qqs -256(,%rbp,2), %xmm22 + +// CHECK: vcvttps2qqs 1016(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6d,0x71,0x7f] + vcvttps2qqs 1016(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttps2qqs -512(%rdx){1to2}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6d,0x72,0x80] + vcvttps2qqs -512(%rdx){1to2}, %xmm22 {%k7} {z} + +// CHECK: vcvttps2qqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2qqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttps2qqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2qqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttps2qqs (%rip){1to4}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2qqs (%rip){1to4}, %ymm22 + +// CHECK: vcvttps2qqs -512(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2qqs -512(,%rbp,2), %ymm22 + +// CHECK: vcvttps2qqs 2032(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6d,0x71,0x7f] + vcvttps2qqs 2032(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttps2qqs -512(%rdx){1to4}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6d,0x72,0x80] + vcvttps2qqs -512(%rdx){1to4}, %ymm22 {%k7} {z} + +// CHECK: vcvttps2qqs 268435456(%rbp,%r14,8), %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2qqs 268435456(%rbp,%r14,8), %zmm22 + +// CHECK: vcvttps2qqs 291(%r8,%rax,4), %zmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2qqs 291(%r8,%rax,4), %zmm22 {%k7} + +// CHECK: vcvttps2qqs (%rip){1to8}, %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2qqs (%rip){1to8}, %zmm22 + +// CHECK: vcvttps2qqs -1024(,%rbp,2), %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2qqs -1024(,%rbp,2), %zmm22 + +// CHECK: vcvttps2qqs 4064(%rcx), %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6d,0x71,0x7f] + vcvttps2qqs 4064(%rcx), %zmm22 {%k7} {z} + +// CHECK: vcvttps2qqs -512(%rdx){1to8}, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6d,0x72,0x80] + vcvttps2qqs -512(%rdx){1to8}, %zmm22 {%k7} {z} + +// CHECK: vcvttps2udqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6c,0xf7] + vcvttps2udqs %xmm23, %xmm22 + +// CHECK: vcvttps2udqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6c,0xf7] + vcvttps2udqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttps2udqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6c,0xf7] + vcvttps2udqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttps2udqs %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6c,0xf7] + vcvttps2udqs %ymm23, %ymm22 + +// CHECK: vcvttps2udqs {sae}, %ymm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6c,0xf7] + vcvttps2udqs {sae}, %ymm23, %ymm22 + +// CHECK: vcvttps2udqs %ymm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6c,0xf7] + vcvttps2udqs %ymm23, %ymm22 {%k7} + +// CHECK: vcvttps2udqs {sae}, %ymm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x6c,0xf7] + vcvttps2udqs {sae}, %ymm23, %ymm22 {%k7} {z} + +// CHECK: vcvttps2udqs %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6c,0xf7] + vcvttps2udqs %zmm23, %zmm22 + +// CHECK: vcvttps2udqs {sae}, %zmm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6c,0xf7] + vcvttps2udqs {sae}, %zmm23, %zmm22 + +// CHECK: vcvttps2udqs %zmm23, %zmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6c,0xf7] + vcvttps2udqs %zmm23, %zmm22 {%k7} + +// CHECK: vcvttps2udqs {sae}, %zmm23, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x6c,0xf7] + vcvttps2udqs {sae}, %zmm23, %zmm22 {%k7} {z} + +// CHECK: vcvttps2udqs 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2udqs 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttps2udqs 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2udqs 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttps2udqs (%rip){1to4}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2udqs (%rip){1to4}, %xmm22 + +// CHECK: vcvttps2udqs -512(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2udqs -512(,%rbp,2), %xmm22 + +// CHECK: vcvttps2udqs 2032(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6c,0x71,0x7f] + vcvttps2udqs 2032(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttps2udqs -512(%rdx){1to4}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6c,0x72,0x80] + vcvttps2udqs -512(%rdx){1to4}, %xmm22 {%k7} {z} + +// CHECK: vcvttps2udqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2udqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttps2udqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2udqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttps2udqs (%rip){1to8}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2udqs (%rip){1to8}, %ymm22 + +// CHECK: vcvttps2udqs -1024(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2udqs -1024(,%rbp,2), %ymm22 + +// CHECK: vcvttps2udqs 4064(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6c,0x71,0x7f] + vcvttps2udqs 4064(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttps2udqs -512(%rdx){1to8}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6c,0x72,0x80] + vcvttps2udqs -512(%rdx){1to8}, %ymm22 {%k7} {z} + +// CHECK: vcvttps2udqs 268435456(%rbp,%r14,8), %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2udqs 268435456(%rbp,%r14,8), %zmm22 + +// CHECK: vcvttps2udqs 291(%r8,%rax,4), %zmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2udqs 291(%r8,%rax,4), %zmm22 {%k7} + +// CHECK: vcvttps2udqs (%rip){1to16}, %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2udqs (%rip){1to16}, %zmm22 + +// CHECK: vcvttps2udqs -2048(,%rbp,2), %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2udqs -2048(,%rbp,2), %zmm22 + +// CHECK: vcvttps2udqs 8128(%rcx), %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6c,0x71,0x7f] + vcvttps2udqs 8128(%rcx), %zmm22 {%k7} {z} + +// CHECK: vcvttps2udqs -512(%rdx){1to16}, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6c,0x72,0x80] + vcvttps2udqs -512(%rdx){1to16}, %zmm22 {%k7} {z} + +// CHECK: vcvttps2uqqs %xmm23, %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6c,0xf7] + vcvttps2uqqs %xmm23, %xmm22 + +// CHECK: vcvttps2uqqs %xmm23, %xmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6c,0xf7] + vcvttps2uqqs %xmm23, %xmm22 {%k7} + +// CHECK: vcvttps2uqqs %xmm23, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6c,0xf7] + vcvttps2uqqs %xmm23, %xmm22 {%k7} {z} + +// CHECK: vcvttps2uqqs %xmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6c,0xf7] + vcvttps2uqqs %xmm23, %ymm22 + +// CHECK: vcvttps2uqqs {sae}, %xmm23, %ymm22 +// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6c,0xf7] + vcvttps2uqqs {sae}, %xmm23, %ymm22 + +// CHECK: vcvttps2uqqs %xmm23, %ymm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6c,0xf7] + vcvttps2uqqs %xmm23, %ymm22 {%k7} + +// CHECK: vcvttps2uqqs {sae}, %xmm23, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x6c,0xf7] + vcvttps2uqqs {sae}, %xmm23, %ymm22 {%k7} {z} + +// CHECK: vcvttps2uqqs %ymm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6c,0xf7] + vcvttps2uqqs %ymm23, %zmm22 + +// CHECK: vcvttps2uqqs {sae}, %ymm23, %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6c,0xf7] + vcvttps2uqqs {sae}, %ymm23, %zmm22 + +// CHECK: vcvttps2uqqs %ymm23, %zmm22 {%k7} +// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6c,0xf7] + vcvttps2uqqs %ymm23, %zmm22 {%k7} + +// CHECK: vcvttps2uqqs {sae}, %ymm23, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x6c,0xf7] + vcvttps2uqqs {sae}, %ymm23, %zmm22 {%k7} {z} + +// CHECK: vcvttps2uqqs 268435456(%rbp,%r14,8), %xmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2uqqs 268435456(%rbp,%r14,8), %xmm22 + +// CHECK: vcvttps2uqqs 291(%r8,%rax,4), %xmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2uqqs 291(%r8,%rax,4), %xmm22 {%k7} + +// CHECK: vcvttps2uqqs (%rip){1to2}, %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2uqqs (%rip){1to2}, %xmm22 + +// CHECK: vcvttps2uqqs -256(,%rbp,2), %xmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6c,0x34,0x6d,0x00,0xff,0xff,0xff] + vcvttps2uqqs -256(,%rbp,2), %xmm22 + +// CHECK: vcvttps2uqqs 1016(%rcx), %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6c,0x71,0x7f] + vcvttps2uqqs 1016(%rcx), %xmm22 {%k7} {z} + +// CHECK: vcvttps2uqqs -512(%rdx){1to2}, %xmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6c,0x72,0x80] + vcvttps2uqqs -512(%rdx){1to2}, %xmm22 {%k7} {z} + +// CHECK: vcvttps2uqqs 268435456(%rbp,%r14,8), %ymm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2uqqs 268435456(%rbp,%r14,8), %ymm22 + +// CHECK: vcvttps2uqqs 291(%r8,%rax,4), %ymm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2uqqs 291(%r8,%rax,4), %ymm22 {%k7} + +// CHECK: vcvttps2uqqs (%rip){1to4}, %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2uqqs (%rip){1to4}, %ymm22 + +// CHECK: vcvttps2uqqs -512(,%rbp,2), %ymm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2uqqs -512(,%rbp,2), %ymm22 + +// CHECK: vcvttps2uqqs 2032(%rcx), %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6c,0x71,0x7f] + vcvttps2uqqs 2032(%rcx), %ymm22 {%k7} {z} + +// CHECK: vcvttps2uqqs -512(%rdx){1to4}, %ymm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6c,0x72,0x80] + vcvttps2uqqs -512(%rdx){1to4}, %ymm22 {%k7} {z} + +// CHECK: vcvttps2uqqs 268435456(%rbp,%r14,8), %zmm22 +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2uqqs 268435456(%rbp,%r14,8), %zmm22 + +// CHECK: vcvttps2uqqs 291(%r8,%rax,4), %zmm22 {%k7} +// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2uqqs 291(%r8,%rax,4), %zmm22 {%k7} + +// CHECK: vcvttps2uqqs (%rip){1to8}, %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2uqqs (%rip){1to8}, %zmm22 + +// CHECK: vcvttps2uqqs -1024(,%rbp,2), %zmm22 +// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2uqqs -1024(,%rbp,2), %zmm22 + +// CHECK: vcvttps2uqqs 4064(%rcx), %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6c,0x71,0x7f] + vcvttps2uqqs 4064(%rcx), %zmm22 {%k7} {z} + +// CHECK: vcvttps2uqqs -512(%rdx){1to8}, %zmm22 {%k7} {z} +// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6c,0x72,0x80] + vcvttps2uqqs -512(%rdx){1to8}, %zmm22 {%k7} {z} + diff --git a/llvm/test/MC/X86/avx10_2satcvtds-64-intel.s b/llvm/test/MC/X86/avx10_2satcvtds-64-intel.s new file mode 100644 index 0000000..9e9af84 --- /dev/null +++ b/llvm/test/MC/X86/avx10_2satcvtds-64-intel.s @@ -0,0 +1,1170 @@ +// RUN: llvm-mc -triple x86_64 -x86-asm-syntax=intel -output-asm-variant=1 --show-encoding %s | FileCheck %s + +// CHECK: vcvttsd2sis ecx, xmm22 +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6d,0xce] + vcvttsd2sis ecx, xmm22 + +// CHECK: vcvttsd2sis ecx, xmm22, {sae} +// CHECK: encoding: [0x62,0xb5,0x7f,0x18,0x6d,0xce] + vcvttsd2sis ecx, xmm22, {sae} + +// CHECK: vcvttsd2sis r9, xmm22 +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6d,0xce] + vcvttsd2sis r9, xmm22 + +// CHECK: vcvttsd2sis r9, xmm22, {sae} +// CHECK: encoding: [0x62,0x35,0xff,0x18,0x6d,0xce] + vcvttsd2sis r9, xmm22, {sae} + +// CHECK: vcvttsd2sis ecx, qword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2sis ecx, qword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttsd2sis ecx, qword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xd5,0x7f,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2sis ecx, qword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttsd2sis ecx, qword ptr [rip] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2sis ecx, qword ptr [rip] + +// CHECK: vcvttsd2sis ecx, qword ptr [2*rbp - 256] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2sis ecx, qword ptr [2*rbp - 256] + +// CHECK: vcvttsd2sis ecx, qword ptr [rcx + 1016] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x49,0x7f] + vcvttsd2sis ecx, qword ptr [rcx + 1016] + +// CHECK: vcvttsd2sis ecx, qword ptr [rdx - 1024] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6d,0x4a,0x80] + vcvttsd2sis ecx, qword ptr [rdx - 1024] + +// CHECK: vcvttsd2sis r9, qword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2sis r9, qword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttsd2sis r9, qword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0x55,0xff,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2sis r9, qword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttsd2sis r9, qword ptr [rip] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2sis r9, qword ptr [rip] + +// CHECK: vcvttsd2sis r9, qword ptr [2*rbp - 256] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2sis r9, qword ptr [2*rbp - 256] + +// CHECK: vcvttsd2sis r9, qword ptr [rcx + 1016] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x49,0x7f] + vcvttsd2sis r9, qword ptr [rcx + 1016] + +// CHECK: vcvttsd2sis r9, qword ptr [rdx - 1024] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6d,0x4a,0x80] + vcvttsd2sis r9, qword ptr [rdx - 1024] + +// CHECK: vcvttsd2usis ecx, xmm22 +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6c,0xce] + vcvttsd2usis ecx, xmm22 + +// CHECK: vcvttsd2usis ecx, xmm22, {sae} +// CHECK: encoding: [0x62,0xb5,0x7f,0x18,0x6c,0xce] + vcvttsd2usis ecx, xmm22, {sae} + +// CHECK: vcvttsd2usis r9, xmm22 +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6c,0xce] + vcvttsd2usis r9, xmm22 + +// CHECK: vcvttsd2usis r9, xmm22, {sae} +// CHECK: encoding: [0x62,0x35,0xff,0x18,0x6c,0xce] + vcvttsd2usis r9, xmm22, {sae} + +// CHECK: vcvttsd2usis ecx, qword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xb5,0x7f,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2usis ecx, qword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttsd2usis ecx, qword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xd5,0x7f,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2usis ecx, qword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttsd2usis ecx, qword ptr [rip] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2usis ecx, qword ptr [rip] + +// CHECK: vcvttsd2usis ecx, qword ptr [2*rbp - 256] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2usis ecx, qword ptr [2*rbp - 256] + +// CHECK: vcvttsd2usis ecx, qword ptr [rcx + 1016] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x49,0x7f] + vcvttsd2usis ecx, qword ptr [rcx + 1016] + +// CHECK: vcvttsd2usis ecx, qword ptr [rdx - 1024] +// CHECK: encoding: [0x62,0xf5,0x7f,0x08,0x6c,0x4a,0x80] + vcvttsd2usis ecx, qword ptr [rdx - 1024] + +// CHECK: vcvttsd2usis r9, qword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x35,0xff,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttsd2usis r9, qword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttsd2usis r9, qword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0x55,0xff,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttsd2usis r9, qword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttsd2usis r9, qword ptr [rip] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttsd2usis r9, qword ptr [rip] + +// CHECK: vcvttsd2usis r9, qword ptr [2*rbp - 256] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x0c,0x6d,0x00,0xff,0xff,0xff] + vcvttsd2usis r9, qword ptr [2*rbp - 256] + +// CHECK: vcvttsd2usis r9, qword ptr [rcx + 1016] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x49,0x7f] + vcvttsd2usis r9, qword ptr [rcx + 1016] + +// CHECK: vcvttsd2usis r9, qword ptr [rdx - 1024] +// CHECK: encoding: [0x62,0x75,0xff,0x08,0x6c,0x4a,0x80] + vcvttsd2usis r9, qword ptr [rdx - 1024] + +// CHECK: vcvttss2sis ecx, xmm22 +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6d,0xce] + vcvttss2sis ecx, xmm22 + +// CHECK: vcvttss2sis ecx, xmm22, {sae} +// CHECK: encoding: [0x62,0xb5,0x7e,0x18,0x6d,0xce] + vcvttss2sis ecx, xmm22, {sae} + +// CHECK: vcvttss2sis r9, xmm22 +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6d,0xce] + vcvttss2sis r9, xmm22 + +// CHECK: vcvttss2sis r9, xmm22, {sae} +// CHECK: encoding: [0x62,0x35,0xfe,0x18,0x6d,0xce] + vcvttss2sis r9, xmm22, {sae} + +// CHECK: vcvttss2sis ecx, dword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2sis ecx, dword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttss2sis ecx, dword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xd5,0x7e,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2sis ecx, dword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttss2sis ecx, dword ptr [rip] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttss2sis ecx, dword ptr [rip] + +// CHECK: vcvttss2sis ecx, dword ptr [2*rbp - 128] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2sis ecx, dword ptr [2*rbp - 128] + +// CHECK: vcvttss2sis ecx, dword ptr [rcx + 508] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x49,0x7f] + vcvttss2sis ecx, dword ptr [rcx + 508] + +// CHECK: vcvttss2sis ecx, dword ptr [rdx - 512] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6d,0x4a,0x80] + vcvttss2sis ecx, dword ptr [rdx - 512] + +// CHECK: vcvttss2sis r9, dword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6d,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2sis r9, dword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttss2sis r9, dword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0x55,0xfe,0x08,0x6d,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2sis r9, dword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttss2sis r9, dword ptr [rip] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x0d,0x00,0x00,0x00,0x00] + vcvttss2sis r9, dword ptr [rip] + +// CHECK: vcvttss2sis r9, dword ptr [2*rbp - 128] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2sis r9, dword ptr [2*rbp - 128] + +// CHECK: vcvttss2sis r9, dword ptr [rcx + 508] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x49,0x7f] + vcvttss2sis r9, dword ptr [rcx + 508] + +// CHECK: vcvttss2sis r9, dword ptr [rdx - 512] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6d,0x4a,0x80] + vcvttss2sis r9, dword ptr [rdx - 512] + +// CHECK: vcvttss2usis ecx, xmm22 +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6c,0xce] + vcvttss2usis ecx, xmm22 + +// CHECK: vcvttss2usis ecx, xmm22, {sae} +// CHECK: encoding: [0x62,0xb5,0x7e,0x18,0x6c,0xce] + vcvttss2usis ecx, xmm22, {sae} + +// CHECK: vcvttss2usis r9, xmm22 +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6c,0xce] + vcvttss2usis r9, xmm22 + +// CHECK: vcvttss2usis r9, xmm22, {sae} +// CHECK: encoding: [0x62,0x35,0xfe,0x18,0x6c,0xce] + vcvttss2usis r9, xmm22, {sae} + +// CHECK: vcvttss2usis ecx, dword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xb5,0x7e,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2usis ecx, dword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttss2usis ecx, dword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xd5,0x7e,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2usis ecx, dword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttss2usis ecx, dword ptr [rip] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttss2usis ecx, dword ptr [rip] + +// CHECK: vcvttss2usis ecx, dword ptr [2*rbp - 128] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2usis ecx, dword ptr [2*rbp - 128] + +// CHECK: vcvttss2usis ecx, dword ptr [rcx + 508] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x49,0x7f] + vcvttss2usis ecx, dword ptr [rcx + 508] + +// CHECK: vcvttss2usis ecx, dword ptr [rdx - 512] +// CHECK: encoding: [0x62,0xf5,0x7e,0x08,0x6c,0x4a,0x80] + vcvttss2usis ecx, dword ptr [rdx - 512] + +// CHECK: vcvttss2usis r9, dword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x35,0xfe,0x08,0x6c,0x8c,0xf5,0x00,0x00,0x00,0x10] + vcvttss2usis r9, dword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttss2usis r9, dword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0x55,0xfe,0x08,0x6c,0x8c,0x80,0x23,0x01,0x00,0x00] + vcvttss2usis r9, dword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttss2usis r9, dword ptr [rip] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x0d,0x00,0x00,0x00,0x00] + vcvttss2usis r9, dword ptr [rip] + +// CHECK: vcvttss2usis r9, dword ptr [2*rbp - 128] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x0c,0x6d,0x80,0xff,0xff,0xff] + vcvttss2usis r9, dword ptr [2*rbp - 128] + +// CHECK: vcvttss2usis r9, dword ptr [rcx + 508] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x49,0x7f] + vcvttss2usis r9, dword ptr [rcx + 508] + +// CHECK: vcvttss2usis r9, dword ptr [rdx - 512] +// CHECK: encoding: [0x62,0x75,0xfe,0x08,0x6c,0x4a,0x80] + vcvttss2usis r9, dword ptr [rdx - 512] + +// CHECK: vcvttpd2dqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6d,0xf7] + vcvttpd2dqs xmm22, xmm23 + +// CHECK: vcvttpd2dqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x0f,0x6d,0xf7] + vcvttpd2dqs xmm22 {k7}, xmm23 + +// CHECK: vcvttpd2dqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x8f,0x6d,0xf7] + vcvttpd2dqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttpd2dqs xmm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x28,0x6d,0xf7] + vcvttpd2dqs xmm22, ymm23 + +// CHECK: vcvttpd2dqs xmm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf8,0x18,0x6d,0xf7] + vcvttpd2dqs xmm22, ymm23, {sae} + +// CHECK: vcvttpd2dqs xmm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x2f,0x6d,0xf7] + vcvttpd2dqs xmm22 {k7}, ymm23 + +// CHECK: vcvttpd2dqs xmm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf8,0x9f,0x6d,0xf7] + vcvttpd2dqs xmm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttpd2dqs ymm22, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6d,0xf7] + vcvttpd2dqs ymm22, zmm23 + +// CHECK: vcvttpd2dqs ymm22, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfc,0x18,0x6d,0xf7] + vcvttpd2dqs ymm22, zmm23, {sae} + +// CHECK: vcvttpd2dqs ymm22 {k7}, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x4f,0x6d,0xf7] + vcvttpd2dqs ymm22 {k7}, zmm23 + +// CHECK: vcvttpd2dqs ymm22 {k7} {z}, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfc,0x9f,0x6d,0xf7] + vcvttpd2dqs ymm22 {k7} {z}, zmm23, {sae} + +// CHECK: vcvttpd2dqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2dqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2dqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfc,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2dqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2dqs xmm22, qword ptr [rip]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfc,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2dqs xmm22, qword ptr [rip]{1to2} + +// CHECK: vcvttpd2dqs xmm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0xfc,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2dqs xmm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttpd2dqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0xfc,0x8f,0x6d,0x71,0x7f] + vcvttpd2dqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttpd2dqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfc,0x9f,0x6d,0x72,0x80] + vcvttpd2dqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} + +// CHECK: vcvttpd2dqs xmm22, qword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfc,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2dqs xmm22, qword ptr [rip]{1to4} + +// CHECK: vcvttpd2dqs xmm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0xfc,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2dqs xmm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttpd2dqs xmm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0xfc,0xaf,0x6d,0x71,0x7f] + vcvttpd2dqs xmm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttpd2dqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfc,0xbf,0x6d,0x72,0x80] + vcvttpd2dqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} + +// CHECK: vcvttpd2dqs ymm22, zmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2dqs ymm22, zmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2dqs ymm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfc,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2dqs ymm22 {k7}, zmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2dqs ymm22, qword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfc,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2dqs ymm22, qword ptr [rip]{1to8} + +// CHECK: vcvttpd2dqs ymm22, zmmword ptr [2*rbp - 2048] +// CHECK: encoding: [0x62,0xe5,0xfc,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2dqs ymm22, zmmword ptr [2*rbp - 2048] + +// CHECK: vcvttpd2dqs ymm22 {k7} {z}, zmmword ptr [rcx + 8128] +// CHECK: encoding: [0x62,0xe5,0xfc,0xcf,0x6d,0x71,0x7f] + vcvttpd2dqs ymm22 {k7} {z}, zmmword ptr [rcx + 8128] + +// CHECK: vcvttpd2dqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfc,0xdf,0x6d,0x72,0x80] + vcvttpd2dqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} + +// CHECK: vcvttpd2qqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6d,0xf7] + vcvttpd2qqs xmm22, xmm23 + +// CHECK: vcvttpd2qqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x0f,0x6d,0xf7] + vcvttpd2qqs xmm22 {k7}, xmm23 + +// CHECK: vcvttpd2qqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x8f,0x6d,0xf7] + vcvttpd2qqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttpd2qqs ymm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6d,0xf7] + vcvttpd2qqs ymm22, ymm23 + +// CHECK: vcvttpd2qqs ymm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf9,0x18,0x6d,0xf7] + vcvttpd2qqs ymm22, ymm23, {sae} + +// CHECK: vcvttpd2qqs ymm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x2f,0x6d,0xf7] + vcvttpd2qqs ymm22 {k7}, ymm23 + +// CHECK: vcvttpd2qqs ymm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf9,0x9f,0x6d,0xf7] + vcvttpd2qqs ymm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttpd2qqs zmm22, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6d,0xf7] + vcvttpd2qqs zmm22, zmm23 + +// CHECK: vcvttpd2qqs zmm22, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfd,0x18,0x6d,0xf7] + vcvttpd2qqs zmm22, zmm23, {sae} + +// CHECK: vcvttpd2qqs zmm22 {k7}, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x4f,0x6d,0xf7] + vcvttpd2qqs zmm22 {k7}, zmm23 + +// CHECK: vcvttpd2qqs zmm22 {k7} {z}, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfd,0x9f,0x6d,0xf7] + vcvttpd2qqs zmm22 {k7} {z}, zmm23, {sae} + +// CHECK: vcvttpd2qqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2qqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2qqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfd,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2qqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2qqs xmm22, qword ptr [rip]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfd,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2qqs xmm22, qword ptr [rip]{1to2} + +// CHECK: vcvttpd2qqs xmm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0xfd,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2qqs xmm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttpd2qqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0xfd,0x8f,0x6d,0x71,0x7f] + vcvttpd2qqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttpd2qqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfd,0x9f,0x6d,0x72,0x80] + vcvttpd2qqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} + +// CHECK: vcvttpd2qqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2qqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2qqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfd,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2qqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2qqs ymm22, qword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfd,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2qqs ymm22, qword ptr [rip]{1to4} + +// CHECK: vcvttpd2qqs ymm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0xfd,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2qqs ymm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttpd2qqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0xfd,0xaf,0x6d,0x71,0x7f] + vcvttpd2qqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttpd2qqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfd,0xbf,0x6d,0x72,0x80] + vcvttpd2qqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} + +// CHECK: vcvttpd2qqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2qqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2qqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfd,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2qqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2qqs zmm22, qword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfd,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttpd2qqs zmm22, qword ptr [rip]{1to8} + +// CHECK: vcvttpd2qqs zmm22, zmmword ptr [2*rbp - 2048] +// CHECK: encoding: [0x62,0xe5,0xfd,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2qqs zmm22, zmmword ptr [2*rbp - 2048] + +// CHECK: vcvttpd2qqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +// CHECK: encoding: [0x62,0xe5,0xfd,0xcf,0x6d,0x71,0x7f] + vcvttpd2qqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] + +// CHECK: vcvttpd2qqs zmm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfd,0xdf,0x6d,0x72,0x80] + vcvttpd2qqs zmm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} + +// CHECK: vcvttpd2udqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6c,0xf7] + vcvttpd2udqs xmm22, xmm23 + +// CHECK: vcvttpd2udqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x0f,0x6c,0xf7] + vcvttpd2udqs xmm22 {k7}, xmm23 + +// CHECK: vcvttpd2udqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x8f,0x6c,0xf7] + vcvttpd2udqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttpd2udqs xmm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x28,0x6c,0xf7] + vcvttpd2udqs xmm22, ymm23 + +// CHECK: vcvttpd2udqs xmm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf8,0x18,0x6c,0xf7] + vcvttpd2udqs xmm22, ymm23, {sae} + +// CHECK: vcvttpd2udqs xmm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x2f,0x6c,0xf7] + vcvttpd2udqs xmm22 {k7}, ymm23 + +// CHECK: vcvttpd2udqs xmm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf8,0x9f,0x6c,0xf7] + vcvttpd2udqs xmm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttpd2udqs ymm22, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6c,0xf7] + vcvttpd2udqs ymm22, zmm23 + +// CHECK: vcvttpd2udqs ymm22, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfc,0x18,0x6c,0xf7] + vcvttpd2udqs ymm22, zmm23, {sae} + +// CHECK: vcvttpd2udqs ymm22 {k7}, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfc,0x4f,0x6c,0xf7] + vcvttpd2udqs ymm22 {k7}, zmm23 + +// CHECK: vcvttpd2udqs ymm22 {k7} {z}, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfc,0x9f,0x6c,0xf7] + vcvttpd2udqs ymm22 {k7} {z}, zmm23, {sae} + +// CHECK: vcvttpd2udqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfc,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2udqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2udqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfc,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2udqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2udqs xmm22, qword ptr [rip]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfc,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2udqs xmm22, qword ptr [rip]{1to2} + +// CHECK: vcvttpd2udqs xmm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0xfc,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2udqs xmm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttpd2udqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0xfc,0x8f,0x6c,0x71,0x7f] + vcvttpd2udqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttpd2udqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfc,0x9f,0x6c,0x72,0x80] + vcvttpd2udqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} + +// CHECK: vcvttpd2udqs xmm22, qword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfc,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2udqs xmm22, qword ptr [rip]{1to4} + +// CHECK: vcvttpd2udqs xmm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0xfc,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2udqs xmm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttpd2udqs xmm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0xfc,0xaf,0x6c,0x71,0x7f] + vcvttpd2udqs xmm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttpd2udqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfc,0xbf,0x6c,0x72,0x80] + vcvttpd2udqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} + +// CHECK: vcvttpd2udqs ymm22, zmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfc,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2udqs ymm22, zmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2udqs ymm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfc,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2udqs ymm22 {k7}, zmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2udqs ymm22, qword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfc,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2udqs ymm22, qword ptr [rip]{1to8} + +// CHECK: vcvttpd2udqs ymm22, zmmword ptr [2*rbp - 2048] +// CHECK: encoding: [0x62,0xe5,0xfc,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2udqs ymm22, zmmword ptr [2*rbp - 2048] + +// CHECK: vcvttpd2udqs ymm22 {k7} {z}, zmmword ptr [rcx + 8128] +// CHECK: encoding: [0x62,0xe5,0xfc,0xcf,0x6c,0x71,0x7f] + vcvttpd2udqs ymm22 {k7} {z}, zmmword ptr [rcx + 8128] + +// CHECK: vcvttpd2udqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfc,0xdf,0x6c,0x72,0x80] + vcvttpd2udqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} + +// CHECK: vcvttpd2uqqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6c,0xf7] + vcvttpd2uqqs xmm22, xmm23 + +// CHECK: vcvttpd2uqqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x0f,0x6c,0xf7] + vcvttpd2uqqs xmm22 {k7}, xmm23 + +// CHECK: vcvttpd2uqqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x8f,0x6c,0xf7] + vcvttpd2uqqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttpd2uqqs ymm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6c,0xf7] + vcvttpd2uqqs ymm22, ymm23 + +// CHECK: vcvttpd2uqqs ymm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf9,0x18,0x6c,0xf7] + vcvttpd2uqqs ymm22, ymm23, {sae} + +// CHECK: vcvttpd2uqqs ymm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x2f,0x6c,0xf7] + vcvttpd2uqqs ymm22 {k7}, ymm23 + +// CHECK: vcvttpd2uqqs ymm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xf9,0x9f,0x6c,0xf7] + vcvttpd2uqqs ymm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttpd2uqqs zmm22, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6c,0xf7] + vcvttpd2uqqs zmm22, zmm23 + +// CHECK: vcvttpd2uqqs zmm22, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfd,0x18,0x6c,0xf7] + vcvttpd2uqqs zmm22, zmm23, {sae} + +// CHECK: vcvttpd2uqqs zmm22 {k7}, zmm23 +// CHECK: encoding: [0x62,0xa5,0xfd,0x4f,0x6c,0xf7] + vcvttpd2uqqs zmm22 {k7}, zmm23 + +// CHECK: vcvttpd2uqqs zmm22 {k7} {z}, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0xfd,0x9f,0x6c,0xf7] + vcvttpd2uqqs zmm22 {k7} {z}, zmm23, {sae} + +// CHECK: vcvttpd2uqqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfd,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2uqqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2uqqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfd,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2uqqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2uqqs xmm22, qword ptr [rip]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfd,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2uqqs xmm22, qword ptr [rip]{1to2} + +// CHECK: vcvttpd2uqqs xmm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0xfd,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttpd2uqqs xmm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttpd2uqqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0xfd,0x8f,0x6c,0x71,0x7f] + vcvttpd2uqqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttpd2uqqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} +// CHECK: encoding: [0x62,0xe5,0xfd,0x9f,0x6c,0x72,0x80] + vcvttpd2uqqs xmm22 {k7} {z}, qword ptr [rdx - 1024]{1to2} + +// CHECK: vcvttpd2uqqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfd,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2uqqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2uqqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfd,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2uqqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2uqqs ymm22, qword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfd,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2uqqs ymm22, qword ptr [rip]{1to4} + +// CHECK: vcvttpd2uqqs ymm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0xfd,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttpd2uqqs ymm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttpd2uqqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0xfd,0xaf,0x6c,0x71,0x7f] + vcvttpd2uqqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttpd2uqqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} +// CHECK: encoding: [0x62,0xe5,0xfd,0xbf,0x6c,0x72,0x80] + vcvttpd2uqqs ymm22 {k7} {z}, qword ptr [rdx - 1024]{1to4} + +// CHECK: vcvttpd2uqqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0xfd,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttpd2uqqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttpd2uqqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0xfd,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttpd2uqqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttpd2uqqs zmm22, qword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfd,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttpd2uqqs zmm22, qword ptr [rip]{1to8} + +// CHECK: vcvttpd2uqqs zmm22, zmmword ptr [2*rbp - 2048] +// CHECK: encoding: [0x62,0xe5,0xfd,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttpd2uqqs zmm22, zmmword ptr [2*rbp - 2048] + +// CHECK: vcvttpd2uqqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +// CHECK: encoding: [0x62,0xe5,0xfd,0xcf,0x6c,0x71,0x7f] + vcvttpd2uqqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] + +// CHECK: vcvttpd2uqqs zmm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} +// CHECK: encoding: [0x62,0xe5,0xfd,0xdf,0x6c,0x72,0x80] + vcvttpd2uqqs zmm22 {k7} {z}, qword ptr [rdx - 1024]{1to8} + +// CHECK: vcvttps2dqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6d,0xf7] + vcvttps2dqs xmm22, xmm23 + +// CHECK: vcvttps2dqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6d,0xf7] + vcvttps2dqs xmm22 {k7}, xmm23 + +// CHECK: vcvttps2dqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6d,0xf7] + vcvttps2dqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttps2dqs ymm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6d,0xf7] + vcvttps2dqs ymm22, ymm23 + +// CHECK: vcvttps2dqs ymm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6d,0xf7] + vcvttps2dqs ymm22, ymm23, {sae} + +// CHECK: vcvttps2dqs ymm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6d,0xf7] + vcvttps2dqs ymm22 {k7}, ymm23 + +// CHECK: vcvttps2dqs ymm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x6d,0xf7] + vcvttps2dqs ymm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttps2dqs zmm22, zmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6d,0xf7] + vcvttps2dqs zmm22, zmm23 + +// CHECK: vcvttps2dqs zmm22, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6d,0xf7] + vcvttps2dqs zmm22, zmm23, {sae} + +// CHECK: vcvttps2dqs zmm22 {k7}, zmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6d,0xf7] + vcvttps2dqs zmm22 {k7}, zmm23 + +// CHECK: vcvttps2dqs zmm22 {k7} {z}, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x6d,0xf7] + vcvttps2dqs zmm22 {k7} {z}, zmm23, {sae} + +// CHECK: vcvttps2dqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2dqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2dqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2dqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2dqs xmm22, dword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2dqs xmm22, dword ptr [rip]{1to4} + +// CHECK: vcvttps2dqs xmm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2dqs xmm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttps2dqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6d,0x71,0x7f] + vcvttps2dqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttps2dqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6d,0x72,0x80] + vcvttps2dqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4} + +// CHECK: vcvttps2dqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2dqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2dqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2dqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2dqs ymm22, dword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2dqs ymm22, dword ptr [rip]{1to8} + +// CHECK: vcvttps2dqs ymm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2dqs ymm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttps2dqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6d,0x71,0x7f] + vcvttps2dqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttps2dqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6d,0x72,0x80] + vcvttps2dqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8} + +// CHECK: vcvttps2dqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2dqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2dqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2dqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2dqs zmm22, dword ptr [rip]{1to16} +// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2dqs zmm22, dword ptr [rip]{1to16} + +// CHECK: vcvttps2dqs zmm22, zmmword ptr [2*rbp - 2048] +// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6d,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2dqs zmm22, zmmword ptr [2*rbp - 2048] + +// CHECK: vcvttps2dqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6d,0x71,0x7f] + vcvttps2dqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] + +// CHECK: vcvttps2dqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16} +// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6d,0x72,0x80] + vcvttps2dqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16} + +// CHECK: vcvttps2qqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6d,0xf7] + vcvttps2qqs xmm22, xmm23 + +// CHECK: vcvttps2qqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6d,0xf7] + vcvttps2qqs xmm22 {k7}, xmm23 + +// CHECK: vcvttps2qqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6d,0xf7] + vcvttps2qqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttps2qqs ymm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6d,0xf7] + vcvttps2qqs ymm22, xmm23 + +// CHECK: vcvttps2qqs ymm22, xmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6d,0xf7] + vcvttps2qqs ymm22, xmm23, {sae} + +// CHECK: vcvttps2qqs ymm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6d,0xf7] + vcvttps2qqs ymm22 {k7}, xmm23 + +// CHECK: vcvttps2qqs ymm22 {k7} {z}, xmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x6d,0xf7] + vcvttps2qqs ymm22 {k7} {z}, xmm23, {sae} + +// CHECK: vcvttps2qqs zmm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6d,0xf7] + vcvttps2qqs zmm22, ymm23 + +// CHECK: vcvttps2qqs zmm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6d,0xf7] + vcvttps2qqs zmm22, ymm23, {sae} + +// CHECK: vcvttps2qqs zmm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6d,0xf7] + vcvttps2qqs zmm22 {k7}, ymm23 + +// CHECK: vcvttps2qqs zmm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x6d,0xf7] + vcvttps2qqs zmm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttps2qqs xmm22, qword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2qqs xmm22, qword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2qqs xmm22 {k7}, qword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2qqs xmm22 {k7}, qword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2qqs xmm22, dword ptr [rip]{1to2} +// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2qqs xmm22, dword ptr [rip]{1to2} + +// CHECK: vcvttps2qqs xmm22, qword ptr [2*rbp - 256] +// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6d,0x34,0x6d,0x00,0xff,0xff,0xff] + vcvttps2qqs xmm22, qword ptr [2*rbp - 256] + +// CHECK: vcvttps2qqs xmm22 {k7} {z}, qword ptr [rcx + 1016] +// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6d,0x71,0x7f] + vcvttps2qqs xmm22 {k7} {z}, qword ptr [rcx + 1016] + +// CHECK: vcvttps2qqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to2} +// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6d,0x72,0x80] + vcvttps2qqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to2} + +// CHECK: vcvttps2qqs ymm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2qqs ymm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2qqs ymm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2qqs ymm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2qqs ymm22, dword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2qqs ymm22, dword ptr [rip]{1to4} + +// CHECK: vcvttps2qqs ymm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6d,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2qqs ymm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttps2qqs ymm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6d,0x71,0x7f] + vcvttps2qqs ymm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttps2qqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6d,0x72,0x80] + vcvttps2qqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to4} + +// CHECK: vcvttps2qqs zmm22, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2qqs zmm22, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2qqs zmm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6d,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2qqs zmm22 {k7}, ymmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2qqs zmm22, dword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6d,0x35,0x00,0x00,0x00,0x00] + vcvttps2qqs zmm22, dword ptr [rip]{1to8} + +// CHECK: vcvttps2qqs zmm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6d,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2qqs zmm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttps2qqs zmm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6d,0x71,0x7f] + vcvttps2qqs zmm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttps2qqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6d,0x72,0x80] + vcvttps2qqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to8} + +// CHECK: vcvttps2udqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6c,0xf7] + vcvttps2udqs xmm22, xmm23 + +// CHECK: vcvttps2udqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x0f,0x6c,0xf7] + vcvttps2udqs xmm22 {k7}, xmm23 + +// CHECK: vcvttps2udqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x8f,0x6c,0xf7] + vcvttps2udqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttps2udqs ymm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6c,0xf7] + vcvttps2udqs ymm22, ymm23 + +// CHECK: vcvttps2udqs ymm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x78,0x18,0x6c,0xf7] + vcvttps2udqs ymm22, ymm23, {sae} + +// CHECK: vcvttps2udqs ymm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x2f,0x6c,0xf7] + vcvttps2udqs ymm22 {k7}, ymm23 + +// CHECK: vcvttps2udqs ymm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x78,0x9f,0x6c,0xf7] + vcvttps2udqs ymm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttps2udqs zmm22, zmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6c,0xf7] + vcvttps2udqs zmm22, zmm23 + +// CHECK: vcvttps2udqs zmm22, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7c,0x18,0x6c,0xf7] + vcvttps2udqs zmm22, zmm23, {sae} + +// CHECK: vcvttps2udqs zmm22 {k7}, zmm23 +// CHECK: encoding: [0x62,0xa5,0x7c,0x4f,0x6c,0xf7] + vcvttps2udqs zmm22 {k7}, zmm23 + +// CHECK: vcvttps2udqs zmm22 {k7} {z}, zmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7c,0x9f,0x6c,0xf7] + vcvttps2udqs zmm22 {k7} {z}, zmm23, {sae} + +// CHECK: vcvttps2udqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7c,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2udqs xmm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2udqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7c,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2udqs xmm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2udqs xmm22, dword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7c,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2udqs xmm22, dword ptr [rip]{1to4} + +// CHECK: vcvttps2udqs xmm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0x7c,0x08,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2udqs xmm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttps2udqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0x7c,0x8f,0x6c,0x71,0x7f] + vcvttps2udqs xmm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttps2udqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7c,0x9f,0x6c,0x72,0x80] + vcvttps2udqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to4} + +// CHECK: vcvttps2udqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7c,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2udqs ymm22, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2udqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7c,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2udqs ymm22 {k7}, ymmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2udqs ymm22, dword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7c,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2udqs ymm22, dword ptr [rip]{1to8} + +// CHECK: vcvttps2udqs ymm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0x7c,0x28,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2udqs ymm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttps2udqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0x7c,0xaf,0x6c,0x71,0x7f] + vcvttps2udqs ymm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttps2udqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7c,0xbf,0x6c,0x72,0x80] + vcvttps2udqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to8} + +// CHECK: vcvttps2udqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7c,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2udqs zmm22, zmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2udqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7c,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2udqs zmm22 {k7}, zmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2udqs zmm22, dword ptr [rip]{1to16} +// CHECK: encoding: [0x62,0xe5,0x7c,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2udqs zmm22, dword ptr [rip]{1to16} + +// CHECK: vcvttps2udqs zmm22, zmmword ptr [2*rbp - 2048] +// CHECK: encoding: [0x62,0xe5,0x7c,0x48,0x6c,0x34,0x6d,0x00,0xf8,0xff,0xff] + vcvttps2udqs zmm22, zmmword ptr [2*rbp - 2048] + +// CHECK: vcvttps2udqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] +// CHECK: encoding: [0x62,0xe5,0x7c,0xcf,0x6c,0x71,0x7f] + vcvttps2udqs zmm22 {k7} {z}, zmmword ptr [rcx + 8128] + +// CHECK: vcvttps2udqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16} +// CHECK: encoding: [0x62,0xe5,0x7c,0xdf,0x6c,0x72,0x80] + vcvttps2udqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to16} + +// CHECK: vcvttps2uqqs xmm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6c,0xf7] + vcvttps2uqqs xmm22, xmm23 + +// CHECK: vcvttps2uqqs xmm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x0f,0x6c,0xf7] + vcvttps2uqqs xmm22 {k7}, xmm23 + +// CHECK: vcvttps2uqqs xmm22 {k7} {z}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x8f,0x6c,0xf7] + vcvttps2uqqs xmm22 {k7} {z}, xmm23 + +// CHECK: vcvttps2uqqs ymm22, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6c,0xf7] + vcvttps2uqqs ymm22, xmm23 + +// CHECK: vcvttps2uqqs ymm22, xmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x79,0x18,0x6c,0xf7] + vcvttps2uqqs ymm22, xmm23, {sae} + +// CHECK: vcvttps2uqqs ymm22 {k7}, xmm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x2f,0x6c,0xf7] + vcvttps2uqqs ymm22 {k7}, xmm23 + +// CHECK: vcvttps2uqqs ymm22 {k7} {z}, xmm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x79,0x9f,0x6c,0xf7] + vcvttps2uqqs ymm22 {k7} {z}, xmm23, {sae} + +// CHECK: vcvttps2uqqs zmm22, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6c,0xf7] + vcvttps2uqqs zmm22, ymm23 + +// CHECK: vcvttps2uqqs zmm22, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7d,0x18,0x6c,0xf7] + vcvttps2uqqs zmm22, ymm23, {sae} + +// CHECK: vcvttps2uqqs zmm22 {k7}, ymm23 +// CHECK: encoding: [0x62,0xa5,0x7d,0x4f,0x6c,0xf7] + vcvttps2uqqs zmm22 {k7}, ymm23 + +// CHECK: vcvttps2uqqs zmm22 {k7} {z}, ymm23, {sae} +// CHECK: encoding: [0x62,0xa5,0x7d,0x9f,0x6c,0xf7] + vcvttps2uqqs zmm22 {k7} {z}, ymm23, {sae} + +// CHECK: vcvttps2uqqs xmm22, qword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7d,0x08,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2uqqs xmm22, qword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2uqqs xmm22 {k7}, qword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7d,0x0f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2uqqs xmm22 {k7}, qword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2uqqs xmm22, dword ptr [rip]{1to2} +// CHECK: encoding: [0x62,0xe5,0x7d,0x18,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2uqqs xmm22, dword ptr [rip]{1to2} + +// CHECK: vcvttps2uqqs xmm22, qword ptr [2*rbp - 256] +// CHECK: encoding: [0x62,0xe5,0x7d,0x08,0x6c,0x34,0x6d,0x00,0xff,0xff,0xff] + vcvttps2uqqs xmm22, qword ptr [2*rbp - 256] + +// CHECK: vcvttps2uqqs xmm22 {k7} {z}, qword ptr [rcx + 1016] +// CHECK: encoding: [0x62,0xe5,0x7d,0x8f,0x6c,0x71,0x7f] + vcvttps2uqqs xmm22 {k7} {z}, qword ptr [rcx + 1016] + +// CHECK: vcvttps2uqqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to2} +// CHECK: encoding: [0x62,0xe5,0x7d,0x9f,0x6c,0x72,0x80] + vcvttps2uqqs xmm22 {k7} {z}, dword ptr [rdx - 512]{1to2} + +// CHECK: vcvttps2uqqs ymm22, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7d,0x28,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2uqqs ymm22, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2uqqs ymm22 {k7}, xmmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7d,0x2f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2uqqs ymm22 {k7}, xmmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2uqqs ymm22, dword ptr [rip]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7d,0x38,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2uqqs ymm22, dword ptr [rip]{1to4} + +// CHECK: vcvttps2uqqs ymm22, xmmword ptr [2*rbp - 512] +// CHECK: encoding: [0x62,0xe5,0x7d,0x28,0x6c,0x34,0x6d,0x00,0xfe,0xff,0xff] + vcvttps2uqqs ymm22, xmmword ptr [2*rbp - 512] + +// CHECK: vcvttps2uqqs ymm22 {k7} {z}, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0xe5,0x7d,0xaf,0x6c,0x71,0x7f] + vcvttps2uqqs ymm22 {k7} {z}, xmmword ptr [rcx + 2032] + +// CHECK: vcvttps2uqqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to4} +// CHECK: encoding: [0x62,0xe5,0x7d,0xbf,0x6c,0x72,0x80] + vcvttps2uqqs ymm22 {k7} {z}, dword ptr [rdx - 512]{1to4} + +// CHECK: vcvttps2uqqs zmm22, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0xa5,0x7d,0x48,0x6c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vcvttps2uqqs zmm22, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vcvttps2uqqs zmm22 {k7}, ymmword ptr [r8 + 4*rax + 291] +// CHECK: encoding: [0x62,0xc5,0x7d,0x4f,0x6c,0xb4,0x80,0x23,0x01,0x00,0x00] + vcvttps2uqqs zmm22 {k7}, ymmword ptr [r8 + 4*rax + 291] + +// CHECK: vcvttps2uqqs zmm22, dword ptr [rip]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7d,0x58,0x6c,0x35,0x00,0x00,0x00,0x00] + vcvttps2uqqs zmm22, dword ptr [rip]{1to8} + +// CHECK: vcvttps2uqqs zmm22, ymmword ptr [2*rbp - 1024] +// CHECK: encoding: [0x62,0xe5,0x7d,0x48,0x6c,0x34,0x6d,0x00,0xfc,0xff,0xff] + vcvttps2uqqs zmm22, ymmword ptr [2*rbp - 1024] + +// CHECK: vcvttps2uqqs zmm22 {k7} {z}, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0xe5,0x7d,0xcf,0x6c,0x71,0x7f] + vcvttps2uqqs zmm22 {k7} {z}, ymmword ptr [rcx + 4064] + +// CHECK: vcvttps2uqqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to8} +// CHECK: encoding: [0x62,0xe5,0x7d,0xdf,0x6c,0x72,0x80] + vcvttps2uqqs zmm22 {k7} {z}, dword ptr [rdx - 512]{1to8} + diff --git a/llvm/test/TableGen/x86-fold-tables.inc b/llvm/test/TableGen/x86-fold-tables.inc index 286fb49..be1b59e 100644 --- a/llvm/test/TableGen/x86-fold-tables.inc +++ b/llvm/test/TableGen/x86-fold-tables.inc @@ -1341,17 +1341,29 @@ static const X86FoldTableEntry Table1[] = { {X86::VCVTTNEBF162IUBSZ128rr, X86::VCVTTNEBF162IUBSZ128rm, 0}, {X86::VCVTTNEBF162IUBSZ256rr, X86::VCVTTNEBF162IUBSZ256rm, 0}, {X86::VCVTTNEBF162IUBSZrr, X86::VCVTTNEBF162IUBSZrm, 0}, + {X86::VCVTTPD2DQSZ128rr, X86::VCVTTPD2DQSZ128rm, 0}, + {X86::VCVTTPD2DQSZ256rr, X86::VCVTTPD2DQSZ256rm, 0}, + {X86::VCVTTPD2DQSZrr, X86::VCVTTPD2DQSZrm, 0}, {X86::VCVTTPD2DQYrr, X86::VCVTTPD2DQYrm, 0}, {X86::VCVTTPD2DQZ128rr, X86::VCVTTPD2DQZ128rm, 0}, {X86::VCVTTPD2DQZ256rr, X86::VCVTTPD2DQZ256rm, 0}, {X86::VCVTTPD2DQZrr, X86::VCVTTPD2DQZrm, 0}, {X86::VCVTTPD2DQrr, X86::VCVTTPD2DQrm, 0}, + {X86::VCVTTPD2QQSZ128rr, X86::VCVTTPD2QQSZ128rm, 0}, + {X86::VCVTTPD2QQSZ256rr, X86::VCVTTPD2QQSZ256rm, 0}, + {X86::VCVTTPD2QQSZrr, X86::VCVTTPD2QQSZrm, 0}, {X86::VCVTTPD2QQZ128rr, X86::VCVTTPD2QQZ128rm, 0}, {X86::VCVTTPD2QQZ256rr, X86::VCVTTPD2QQZ256rm, 0}, {X86::VCVTTPD2QQZrr, X86::VCVTTPD2QQZrm, 0}, + {X86::VCVTTPD2UDQSZ128rr, X86::VCVTTPD2UDQSZ128rm, 0}, + {X86::VCVTTPD2UDQSZ256rr, X86::VCVTTPD2UDQSZ256rm, 0}, + {X86::VCVTTPD2UDQSZrr, X86::VCVTTPD2UDQSZrm, 0}, {X86::VCVTTPD2UDQZ128rr, X86::VCVTTPD2UDQZ128rm, 0}, {X86::VCVTTPD2UDQZ256rr, X86::VCVTTPD2UDQZ256rm, 0}, {X86::VCVTTPD2UDQZrr, X86::VCVTTPD2UDQZrm, 0}, + {X86::VCVTTPD2UQQSZ128rr, X86::VCVTTPD2UQQSZ128rm, 0}, + {X86::VCVTTPD2UQQSZ256rr, X86::VCVTTPD2UQQSZ256rm, 0}, + {X86::VCVTTPD2UQQSZrr, X86::VCVTTPD2UQQSZrm, 0}, {X86::VCVTTPD2UQQZ128rr, X86::VCVTTPD2UQQZ128rm, 0}, {X86::VCVTTPD2UQQZ256rr, X86::VCVTTPD2UQQZ256rm, 0}, {X86::VCVTTPD2UQQZrr, X86::VCVTTPD2UQQZrm, 0}, @@ -1379,6 +1391,9 @@ static const X86FoldTableEntry Table1[] = { {X86::VCVTTPH2WZ128rr, X86::VCVTTPH2WZ128rm, 0}, {X86::VCVTTPH2WZ256rr, X86::VCVTTPH2WZ256rm, 0}, {X86::VCVTTPH2WZrr, X86::VCVTTPH2WZrm, 0}, + {X86::VCVTTPS2DQSZ128rr, X86::VCVTTPS2DQSZ128rm, 0}, + {X86::VCVTTPS2DQSZ256rr, X86::VCVTTPS2DQSZ256rm, 0}, + {X86::VCVTTPS2DQSZrr, X86::VCVTTPS2DQSZrm, 0}, {X86::VCVTTPS2DQYrr, X86::VCVTTPS2DQYrm, 0}, {X86::VCVTTPS2DQZ128rr, X86::VCVTTPS2DQZ128rm, 0}, {X86::VCVTTPS2DQZ256rr, X86::VCVTTPS2DQZ256rm, 0}, @@ -1390,25 +1405,42 @@ static const X86FoldTableEntry Table1[] = { {X86::VCVTTPS2IUBSZ128rr, X86::VCVTTPS2IUBSZ128rm, 0}, {X86::VCVTTPS2IUBSZ256rr, X86::VCVTTPS2IUBSZ256rm, 0}, {X86::VCVTTPS2IUBSZrr, X86::VCVTTPS2IUBSZrm, 0}, + {X86::VCVTTPS2QQSZ128rr, X86::VCVTTPS2QQSZ128rm, TB_NO_REVERSE}, + {X86::VCVTTPS2QQSZ256rr, X86::VCVTTPS2QQSZ256rm, 0}, + {X86::VCVTTPS2QQSZrr, X86::VCVTTPS2QQSZrm, 0}, {X86::VCVTTPS2QQZ128rr, X86::VCVTTPS2QQZ128rm, TB_NO_REVERSE}, {X86::VCVTTPS2QQZ256rr, X86::VCVTTPS2QQZ256rm, 0}, {X86::VCVTTPS2QQZrr, X86::VCVTTPS2QQZrm, 0}, + {X86::VCVTTPS2UDQSZ128rr, X86::VCVTTPS2UDQSZ128rm, 0}, + {X86::VCVTTPS2UDQSZ256rr, X86::VCVTTPS2UDQSZ256rm, 0}, + {X86::VCVTTPS2UDQSZrr, X86::VCVTTPS2UDQSZrm, 0}, {X86::VCVTTPS2UDQZ128rr, X86::VCVTTPS2UDQZ128rm, 0}, {X86::VCVTTPS2UDQZ256rr, X86::VCVTTPS2UDQZ256rm, 0}, {X86::VCVTTPS2UDQZrr, X86::VCVTTPS2UDQZrm, 0}, + {X86::VCVTTPS2UQQSZ128rr, X86::VCVTTPS2UQQSZ128rm, TB_NO_REVERSE}, + {X86::VCVTTPS2UQQSZ256rr, X86::VCVTTPS2UQQSZ256rm, 0}, + {X86::VCVTTPS2UQQSZrr, X86::VCVTTPS2UQQSZrm, 0}, {X86::VCVTTPS2UQQZ128rr, X86::VCVTTPS2UQQZ128rm, TB_NO_REVERSE}, {X86::VCVTTPS2UQQZ256rr, X86::VCVTTPS2UQQZ256rm, 0}, {X86::VCVTTPS2UQQZrr, X86::VCVTTPS2UQQZrm, 0}, + {X86::VCVTTSD2SI64Srr, X86::VCVTTSD2SI64Srm, 0}, + {X86::VCVTTSD2SI64Srr_Int, X86::VCVTTSD2SI64Srm_Int, TB_NO_REVERSE}, {X86::VCVTTSD2SI64Zrr, X86::VCVTTSD2SI64Zrm, 0}, {X86::VCVTTSD2SI64Zrr_Int, X86::VCVTTSD2SI64Zrm_Int, TB_NO_REVERSE}, {X86::VCVTTSD2SI64rr, X86::VCVTTSD2SI64rm, 0}, {X86::VCVTTSD2SI64rr_Int, X86::VCVTTSD2SI64rm_Int, TB_NO_REVERSE}, + {X86::VCVTTSD2SISrr, X86::VCVTTSD2SISrm, 0}, + {X86::VCVTTSD2SISrr_Int, X86::VCVTTSD2SISrm_Int, TB_NO_REVERSE}, {X86::VCVTTSD2SIZrr, X86::VCVTTSD2SIZrm, 0}, {X86::VCVTTSD2SIZrr_Int, X86::VCVTTSD2SIZrm_Int, TB_NO_REVERSE}, {X86::VCVTTSD2SIrr, X86::VCVTTSD2SIrm, 0}, {X86::VCVTTSD2SIrr_Int, X86::VCVTTSD2SIrm_Int, TB_NO_REVERSE}, + {X86::VCVTTSD2USI64Srr, X86::VCVTTSD2USI64Srm, 0}, + {X86::VCVTTSD2USI64Srr_Int, X86::VCVTTSD2USI64Srm_Int, TB_NO_REVERSE}, {X86::VCVTTSD2USI64Zrr, X86::VCVTTSD2USI64Zrm, 0}, {X86::VCVTTSD2USI64Zrr_Int, X86::VCVTTSD2USI64Zrm_Int, TB_NO_REVERSE}, + {X86::VCVTTSD2USISrr, X86::VCVTTSD2USISrm, 0}, + {X86::VCVTTSD2USISrr_Int, X86::VCVTTSD2USISrm_Int, TB_NO_REVERSE}, {X86::VCVTTSD2USIZrr, X86::VCVTTSD2USIZrm, 0}, {X86::VCVTTSD2USIZrr_Int, X86::VCVTTSD2USIZrm_Int, TB_NO_REVERSE}, {X86::VCVTTSH2SI64Zrr, X86::VCVTTSH2SI64Zrm, 0}, @@ -1419,16 +1451,24 @@ static const X86FoldTableEntry Table1[] = { {X86::VCVTTSH2USI64Zrr_Int, X86::VCVTTSH2USI64Zrm_Int, TB_NO_REVERSE}, {X86::VCVTTSH2USIZrr, X86::VCVTTSH2USIZrm, 0}, {X86::VCVTTSH2USIZrr_Int, X86::VCVTTSH2USIZrm_Int, TB_NO_REVERSE}, + {X86::VCVTTSS2SI64Srr, X86::VCVTTSS2SI64Srm, 0}, + {X86::VCVTTSS2SI64Srr_Int, X86::VCVTTSS2SI64Srm_Int, TB_NO_REVERSE}, {X86::VCVTTSS2SI64Zrr, X86::VCVTTSS2SI64Zrm, 0}, {X86::VCVTTSS2SI64Zrr_Int, X86::VCVTTSS2SI64Zrm_Int, TB_NO_REVERSE}, {X86::VCVTTSS2SI64rr, X86::VCVTTSS2SI64rm, 0}, {X86::VCVTTSS2SI64rr_Int, X86::VCVTTSS2SI64rm_Int, TB_NO_REVERSE}, + {X86::VCVTTSS2SISrr, X86::VCVTTSS2SISrm, 0}, + {X86::VCVTTSS2SISrr_Int, X86::VCVTTSS2SISrm_Int, TB_NO_REVERSE}, {X86::VCVTTSS2SIZrr, X86::VCVTTSS2SIZrm, 0}, {X86::VCVTTSS2SIZrr_Int, X86::VCVTTSS2SIZrm_Int, TB_NO_REVERSE}, {X86::VCVTTSS2SIrr, X86::VCVTTSS2SIrm, 0}, {X86::VCVTTSS2SIrr_Int, X86::VCVTTSS2SIrm_Int, TB_NO_REVERSE}, + {X86::VCVTTSS2USI64Srr, X86::VCVTTSS2USI64Srm, 0}, + {X86::VCVTTSS2USI64Srr_Int, X86::VCVTTSS2USI64Srm_Int, TB_NO_REVERSE}, {X86::VCVTTSS2USI64Zrr, X86::VCVTTSS2USI64Zrm, 0}, {X86::VCVTTSS2USI64Zrr_Int, X86::VCVTTSS2USI64Zrm_Int, TB_NO_REVERSE}, + {X86::VCVTTSS2USISrr, X86::VCVTTSS2USISrm, 0}, + {X86::VCVTTSS2USISrr_Int, X86::VCVTTSS2USISrm_Int, TB_NO_REVERSE}, {X86::VCVTTSS2USIZrr, X86::VCVTTSS2USIZrm, 0}, {X86::VCVTTSS2USIZrr_Int, X86::VCVTTSS2USIZrm_Int, TB_NO_REVERSE}, {X86::VCVTUDQ2PDZ128rr, X86::VCVTUDQ2PDZ128rm, TB_NO_REVERSE}, @@ -2676,15 +2716,27 @@ static const X86FoldTableEntry Table2[] = { {X86::VCVTTNEBF162IUBSZ128rrkz, X86::VCVTTNEBF162IUBSZ128rmkz, 0}, {X86::VCVTTNEBF162IUBSZ256rrkz, X86::VCVTTNEBF162IUBSZ256rmkz, 0}, {X86::VCVTTNEBF162IUBSZrrkz, X86::VCVTTNEBF162IUBSZrmkz, 0}, + {X86::VCVTTPD2DQSZ128rrkz, X86::VCVTTPD2DQSZ128rmkz, 0}, + {X86::VCVTTPD2DQSZ256rrkz, X86::VCVTTPD2DQSZ256rmkz, 0}, + {X86::VCVTTPD2DQSZrrkz, X86::VCVTTPD2DQSZrmkz, 0}, {X86::VCVTTPD2DQZ128rrkz, X86::VCVTTPD2DQZ128rmkz, 0}, {X86::VCVTTPD2DQZ256rrkz, X86::VCVTTPD2DQZ256rmkz, 0}, {X86::VCVTTPD2DQZrrkz, X86::VCVTTPD2DQZrmkz, 0}, + {X86::VCVTTPD2QQSZ128rrkz, X86::VCVTTPD2QQSZ128rmkz, 0}, + {X86::VCVTTPD2QQSZ256rrkz, X86::VCVTTPD2QQSZ256rmkz, 0}, + {X86::VCVTTPD2QQSZrrkz, X86::VCVTTPD2QQSZrmkz, 0}, {X86::VCVTTPD2QQZ128rrkz, X86::VCVTTPD2QQZ128rmkz, 0}, {X86::VCVTTPD2QQZ256rrkz, X86::VCVTTPD2QQZ256rmkz, 0}, {X86::VCVTTPD2QQZrrkz, X86::VCVTTPD2QQZrmkz, 0}, + {X86::VCVTTPD2UDQSZ128rrkz, X86::VCVTTPD2UDQSZ128rmkz, 0}, + {X86::VCVTTPD2UDQSZ256rrkz, X86::VCVTTPD2UDQSZ256rmkz, 0}, + {X86::VCVTTPD2UDQSZrrkz, X86::VCVTTPD2UDQSZrmkz, 0}, {X86::VCVTTPD2UDQZ128rrkz, X86::VCVTTPD2UDQZ128rmkz, 0}, {X86::VCVTTPD2UDQZ256rrkz, X86::VCVTTPD2UDQZ256rmkz, 0}, {X86::VCVTTPD2UDQZrrkz, X86::VCVTTPD2UDQZrmkz, 0}, + {X86::VCVTTPD2UQQSZ128rrkz, X86::VCVTTPD2UQQSZ128rmkz, 0}, + {X86::VCVTTPD2UQQSZ256rrkz, X86::VCVTTPD2UQQSZ256rmkz, 0}, + {X86::VCVTTPD2UQQSZrrkz, X86::VCVTTPD2UQQSZrmkz, 0}, {X86::VCVTTPD2UQQZ128rrkz, X86::VCVTTPD2UQQZ128rmkz, 0}, {X86::VCVTTPD2UQQZ256rrkz, X86::VCVTTPD2UQQZ256rmkz, 0}, {X86::VCVTTPD2UQQZrrkz, X86::VCVTTPD2UQQZrmkz, 0}, @@ -2712,6 +2764,9 @@ static const X86FoldTableEntry Table2[] = { {X86::VCVTTPH2WZ128rrkz, X86::VCVTTPH2WZ128rmkz, 0}, {X86::VCVTTPH2WZ256rrkz, X86::VCVTTPH2WZ256rmkz, 0}, {X86::VCVTTPH2WZrrkz, X86::VCVTTPH2WZrmkz, 0}, + {X86::VCVTTPS2DQSZ128rrkz, X86::VCVTTPS2DQSZ128rmkz, 0}, + {X86::VCVTTPS2DQSZ256rrkz, X86::VCVTTPS2DQSZ256rmkz, 0}, + {X86::VCVTTPS2DQSZrrkz, X86::VCVTTPS2DQSZrmkz, 0}, {X86::VCVTTPS2DQZ128rrkz, X86::VCVTTPS2DQZ128rmkz, 0}, {X86::VCVTTPS2DQZ256rrkz, X86::VCVTTPS2DQZ256rmkz, 0}, {X86::VCVTTPS2DQZrrkz, X86::VCVTTPS2DQZrmkz, 0}, @@ -2721,12 +2776,21 @@ static const X86FoldTableEntry Table2[] = { {X86::VCVTTPS2IUBSZ128rrkz, X86::VCVTTPS2IUBSZ128rmkz, 0}, {X86::VCVTTPS2IUBSZ256rrkz, X86::VCVTTPS2IUBSZ256rmkz, 0}, {X86::VCVTTPS2IUBSZrrkz, X86::VCVTTPS2IUBSZrmkz, 0}, + {X86::VCVTTPS2QQSZ128rrkz, X86::VCVTTPS2QQSZ128rmkz, TB_NO_REVERSE}, + {X86::VCVTTPS2QQSZ256rrkz, X86::VCVTTPS2QQSZ256rmkz, 0}, + {X86::VCVTTPS2QQSZrrkz, X86::VCVTTPS2QQSZrmkz, 0}, {X86::VCVTTPS2QQZ128rrkz, X86::VCVTTPS2QQZ128rmkz, TB_NO_REVERSE}, {X86::VCVTTPS2QQZ256rrkz, X86::VCVTTPS2QQZ256rmkz, 0}, {X86::VCVTTPS2QQZrrkz, X86::VCVTTPS2QQZrmkz, 0}, + {X86::VCVTTPS2UDQSZ128rrkz, X86::VCVTTPS2UDQSZ128rmkz, 0}, + {X86::VCVTTPS2UDQSZ256rrkz, X86::VCVTTPS2UDQSZ256rmkz, 0}, + {X86::VCVTTPS2UDQSZrrkz, X86::VCVTTPS2UDQSZrmkz, 0}, {X86::VCVTTPS2UDQZ128rrkz, X86::VCVTTPS2UDQZ128rmkz, 0}, {X86::VCVTTPS2UDQZ256rrkz, X86::VCVTTPS2UDQZ256rmkz, 0}, {X86::VCVTTPS2UDQZrrkz, X86::VCVTTPS2UDQZrmkz, 0}, + {X86::VCVTTPS2UQQSZ128rrkz, X86::VCVTTPS2UQQSZ128rmkz, TB_NO_REVERSE}, + {X86::VCVTTPS2UQQSZ256rrkz, X86::VCVTTPS2UQQSZ256rmkz, 0}, + {X86::VCVTTPS2UQQSZrrkz, X86::VCVTTPS2UQQSZrmkz, 0}, {X86::VCVTTPS2UQQZ128rrkz, X86::VCVTTPS2UQQZ128rmkz, TB_NO_REVERSE}, {X86::VCVTTPS2UQQZ256rrkz, X86::VCVTTPS2UQQZ256rmkz, 0}, {X86::VCVTTPS2UQQZrrkz, X86::VCVTTPS2UQQZrmkz, 0}, @@ -4366,15 +4430,27 @@ static const X86FoldTableEntry Table3[] = { {X86::VCVTTNEBF162IUBSZ128rrk, X86::VCVTTNEBF162IUBSZ128rmk, 0}, {X86::VCVTTNEBF162IUBSZ256rrk, X86::VCVTTNEBF162IUBSZ256rmk, 0}, {X86::VCVTTNEBF162IUBSZrrk, X86::VCVTTNEBF162IUBSZrmk, 0}, + {X86::VCVTTPD2DQSZ128rrk, X86::VCVTTPD2DQSZ128rmk, 0}, + {X86::VCVTTPD2DQSZ256rrk, X86::VCVTTPD2DQSZ256rmk, 0}, + {X86::VCVTTPD2DQSZrrk, X86::VCVTTPD2DQSZrmk, 0}, {X86::VCVTTPD2DQZ128rrk, X86::VCVTTPD2DQZ128rmk, 0}, {X86::VCVTTPD2DQZ256rrk, X86::VCVTTPD2DQZ256rmk, 0}, {X86::VCVTTPD2DQZrrk, X86::VCVTTPD2DQZrmk, 0}, + {X86::VCVTTPD2QQSZ128rrk, X86::VCVTTPD2QQSZ128rmk, 0}, + {X86::VCVTTPD2QQSZ256rrk, X86::VCVTTPD2QQSZ256rmk, 0}, + {X86::VCVTTPD2QQSZrrk, X86::VCVTTPD2QQSZrmk, 0}, {X86::VCVTTPD2QQZ128rrk, X86::VCVTTPD2QQZ128rmk, 0}, {X86::VCVTTPD2QQZ256rrk, X86::VCVTTPD2QQZ256rmk, 0}, {X86::VCVTTPD2QQZrrk, X86::VCVTTPD2QQZrmk, 0}, + {X86::VCVTTPD2UDQSZ128rrk, X86::VCVTTPD2UDQSZ128rmk, 0}, + {X86::VCVTTPD2UDQSZ256rrk, X86::VCVTTPD2UDQSZ256rmk, 0}, + {X86::VCVTTPD2UDQSZrrk, X86::VCVTTPD2UDQSZrmk, 0}, {X86::VCVTTPD2UDQZ128rrk, X86::VCVTTPD2UDQZ128rmk, 0}, {X86::VCVTTPD2UDQZ256rrk, X86::VCVTTPD2UDQZ256rmk, 0}, {X86::VCVTTPD2UDQZrrk, X86::VCVTTPD2UDQZrmk, 0}, + {X86::VCVTTPD2UQQSZ128rrk, X86::VCVTTPD2UQQSZ128rmk, 0}, + {X86::VCVTTPD2UQQSZ256rrk, X86::VCVTTPD2UQQSZ256rmk, 0}, + {X86::VCVTTPD2UQQSZrrk, X86::VCVTTPD2UQQSZrmk, 0}, {X86::VCVTTPD2UQQZ128rrk, X86::VCVTTPD2UQQZ128rmk, 0}, {X86::VCVTTPD2UQQZ256rrk, X86::VCVTTPD2UQQZ256rmk, 0}, {X86::VCVTTPD2UQQZrrk, X86::VCVTTPD2UQQZrmk, 0}, @@ -4402,6 +4478,9 @@ static const X86FoldTableEntry Table3[] = { {X86::VCVTTPH2WZ128rrk, X86::VCVTTPH2WZ128rmk, 0}, {X86::VCVTTPH2WZ256rrk, X86::VCVTTPH2WZ256rmk, 0}, {X86::VCVTTPH2WZrrk, X86::VCVTTPH2WZrmk, 0}, + {X86::VCVTTPS2DQSZ128rrk, X86::VCVTTPS2DQSZ128rmk, 0}, + {X86::VCVTTPS2DQSZ256rrk, X86::VCVTTPS2DQSZ256rmk, 0}, + {X86::VCVTTPS2DQSZrrk, X86::VCVTTPS2DQSZrmk, 0}, {X86::VCVTTPS2DQZ128rrk, X86::VCVTTPS2DQZ128rmk, 0}, {X86::VCVTTPS2DQZ256rrk, X86::VCVTTPS2DQZ256rmk, 0}, {X86::VCVTTPS2DQZrrk, X86::VCVTTPS2DQZrmk, 0}, @@ -4411,12 +4490,21 @@ static const X86FoldTableEntry Table3[] = { {X86::VCVTTPS2IUBSZ128rrk, X86::VCVTTPS2IUBSZ128rmk, 0}, {X86::VCVTTPS2IUBSZ256rrk, X86::VCVTTPS2IUBSZ256rmk, 0}, {X86::VCVTTPS2IUBSZrrk, X86::VCVTTPS2IUBSZrmk, 0}, + {X86::VCVTTPS2QQSZ128rrk, X86::VCVTTPS2QQSZ128rmk, TB_NO_REVERSE}, + {X86::VCVTTPS2QQSZ256rrk, X86::VCVTTPS2QQSZ256rmk, 0}, + {X86::VCVTTPS2QQSZrrk, X86::VCVTTPS2QQSZrmk, 0}, {X86::VCVTTPS2QQZ128rrk, X86::VCVTTPS2QQZ128rmk, TB_NO_REVERSE}, {X86::VCVTTPS2QQZ256rrk, X86::VCVTTPS2QQZ256rmk, 0}, {X86::VCVTTPS2QQZrrk, X86::VCVTTPS2QQZrmk, 0}, + {X86::VCVTTPS2UDQSZ128rrk, X86::VCVTTPS2UDQSZ128rmk, 0}, + {X86::VCVTTPS2UDQSZ256rrk, X86::VCVTTPS2UDQSZ256rmk, 0}, + {X86::VCVTTPS2UDQSZrrk, X86::VCVTTPS2UDQSZrmk, 0}, {X86::VCVTTPS2UDQZ128rrk, X86::VCVTTPS2UDQZ128rmk, 0}, {X86::VCVTTPS2UDQZ256rrk, X86::VCVTTPS2UDQZ256rmk, 0}, {X86::VCVTTPS2UDQZrrk, X86::VCVTTPS2UDQZrmk, 0}, + {X86::VCVTTPS2UQQSZ128rrk, X86::VCVTTPS2UQQSZ128rmk, TB_NO_REVERSE}, + {X86::VCVTTPS2UQQSZ256rrk, X86::VCVTTPS2UQQSZ256rmk, 0}, + {X86::VCVTTPS2UQQSZrrk, X86::VCVTTPS2UQQSZrmk, 0}, {X86::VCVTTPS2UQQZ128rrk, X86::VCVTTPS2UQQZ128rmk, TB_NO_REVERSE}, {X86::VCVTTPS2UQQZ256rrk, X86::VCVTTPS2UQQZ256rmk, 0}, {X86::VCVTTPS2UQQZrrk, X86::VCVTTPS2UQQZrmk, 0}, @@ -7432,15 +7520,27 @@ static const X86FoldTableEntry BroadcastTable1[] = { {X86::VCVTTNEBF162IUBSZ128rr, X86::VCVTTNEBF162IUBSZ128rmb, TB_BCAST_SH}, {X86::VCVTTNEBF162IUBSZ256rr, X86::VCVTTNEBF162IUBSZ256rmb, TB_BCAST_SH}, {X86::VCVTTNEBF162IUBSZrr, X86::VCVTTNEBF162IUBSZrmb, TB_BCAST_SH}, + {X86::VCVTTPD2DQSZ128rr, X86::VCVTTPD2DQSZ128rmb, TB_BCAST_SD}, + {X86::VCVTTPD2DQSZ256rr, X86::VCVTTPD2DQSZ256rmb, TB_BCAST_SD}, + {X86::VCVTTPD2DQSZrr, X86::VCVTTPD2DQSZrmb, TB_BCAST_SD}, {X86::VCVTTPD2DQZ128rr, X86::VCVTTPD2DQZ128rmb, TB_BCAST_SD}, {X86::VCVTTPD2DQZ256rr, X86::VCVTTPD2DQZ256rmb, TB_BCAST_SD}, {X86::VCVTTPD2DQZrr, X86::VCVTTPD2DQZrmb, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZ128rr, X86::VCVTTPD2QQSZ128rmb, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZ256rr, X86::VCVTTPD2QQSZ256rmb, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZrr, X86::VCVTTPD2QQSZrmb, TB_BCAST_SD}, {X86::VCVTTPD2QQZ128rr, X86::VCVTTPD2QQZ128rmb, TB_BCAST_SD}, {X86::VCVTTPD2QQZ256rr, X86::VCVTTPD2QQZ256rmb, TB_BCAST_SD}, {X86::VCVTTPD2QQZrr, X86::VCVTTPD2QQZrmb, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZ128rr, X86::VCVTTPD2UDQSZ128rmb, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZ256rr, X86::VCVTTPD2UDQSZ256rmb, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZrr, X86::VCVTTPD2UDQSZrmb, TB_BCAST_SD}, {X86::VCVTTPD2UDQZ128rr, X86::VCVTTPD2UDQZ128rmb, TB_BCAST_SD}, {X86::VCVTTPD2UDQZ256rr, X86::VCVTTPD2UDQZ256rmb, TB_BCAST_SD}, {X86::VCVTTPD2UDQZrr, X86::VCVTTPD2UDQZrmb, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZ128rr, X86::VCVTTPD2UQQSZ128rmb, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZ256rr, X86::VCVTTPD2UQQSZ256rmb, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZrr, X86::VCVTTPD2UQQSZrmb, TB_BCAST_SD}, {X86::VCVTTPD2UQQZ128rr, X86::VCVTTPD2UQQZ128rmb, TB_BCAST_SD}, {X86::VCVTTPD2UQQZ256rr, X86::VCVTTPD2UQQZ256rmb, TB_BCAST_SD}, {X86::VCVTTPD2UQQZrr, X86::VCVTTPD2UQQZrmb, TB_BCAST_SD}, @@ -7468,6 +7568,9 @@ static const X86FoldTableEntry BroadcastTable1[] = { {X86::VCVTTPH2WZ128rr, X86::VCVTTPH2WZ128rmb, TB_BCAST_SH}, {X86::VCVTTPH2WZ256rr, X86::VCVTTPH2WZ256rmb, TB_BCAST_SH}, {X86::VCVTTPH2WZrr, X86::VCVTTPH2WZrmb, TB_BCAST_SH}, + {X86::VCVTTPS2DQSZ128rr, X86::VCVTTPS2DQSZ128rmb, TB_BCAST_SS}, + {X86::VCVTTPS2DQSZ256rr, X86::VCVTTPS2DQSZ256rmb, TB_BCAST_SS}, + {X86::VCVTTPS2DQSZrr, X86::VCVTTPS2DQSZrmb, TB_BCAST_SS}, {X86::VCVTTPS2DQZ128rr, X86::VCVTTPS2DQZ128rmb, TB_BCAST_SS}, {X86::VCVTTPS2DQZ256rr, X86::VCVTTPS2DQZ256rmb, TB_BCAST_SS}, {X86::VCVTTPS2DQZrr, X86::VCVTTPS2DQZrmb, TB_BCAST_SS}, @@ -7477,12 +7580,21 @@ static const X86FoldTableEntry BroadcastTable1[] = { {X86::VCVTTPS2IUBSZ128rr, X86::VCVTTPS2IUBSZ128rmb, TB_BCAST_SS}, {X86::VCVTTPS2IUBSZ256rr, X86::VCVTTPS2IUBSZ256rmb, TB_BCAST_SS}, {X86::VCVTTPS2IUBSZrr, X86::VCVTTPS2IUBSZrmb, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZ128rr, X86::VCVTTPS2QQSZ128rmb, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZ256rr, X86::VCVTTPS2QQSZ256rmb, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZrr, X86::VCVTTPS2QQSZrmb, TB_BCAST_SS}, {X86::VCVTTPS2QQZ128rr, X86::VCVTTPS2QQZ128rmb, TB_BCAST_SS}, {X86::VCVTTPS2QQZ256rr, X86::VCVTTPS2QQZ256rmb, TB_BCAST_SS}, {X86::VCVTTPS2QQZrr, X86::VCVTTPS2QQZrmb, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZ128rr, X86::VCVTTPS2UDQSZ128rmb, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZ256rr, X86::VCVTTPS2UDQSZ256rmb, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZrr, X86::VCVTTPS2UDQSZrmb, TB_BCAST_SS}, {X86::VCVTTPS2UDQZ128rr, X86::VCVTTPS2UDQZ128rmb, TB_BCAST_SS}, {X86::VCVTTPS2UDQZ256rr, X86::VCVTTPS2UDQZ256rmb, TB_BCAST_SS}, {X86::VCVTTPS2UDQZrr, X86::VCVTTPS2UDQZrmb, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZ128rr, X86::VCVTTPS2UQQSZ128rmb, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZ256rr, X86::VCVTTPS2UQQSZ256rmb, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZrr, X86::VCVTTPS2UQQSZrmb, TB_BCAST_SS}, {X86::VCVTTPS2UQQZ128rr, X86::VCVTTPS2UQQZ128rmb, TB_BCAST_SS}, {X86::VCVTTPS2UQQZ256rr, X86::VCVTTPS2UQQZ256rmb, TB_BCAST_SS}, {X86::VCVTTPS2UQQZrr, X86::VCVTTPS2UQQZrmb, TB_BCAST_SS}, @@ -7877,15 +7989,27 @@ static const X86FoldTableEntry BroadcastTable2[] = { {X86::VCVTTNEBF162IUBSZ128rrkz, X86::VCVTTNEBF162IUBSZ128rmbkz, TB_BCAST_SH}, {X86::VCVTTNEBF162IUBSZ256rrkz, X86::VCVTTNEBF162IUBSZ256rmbkz, TB_BCAST_SH}, {X86::VCVTTNEBF162IUBSZrrkz, X86::VCVTTNEBF162IUBSZrmbkz, TB_BCAST_SH}, + {X86::VCVTTPD2DQSZ128rrkz, X86::VCVTTPD2DQSZ128rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2DQSZ256rrkz, X86::VCVTTPD2DQSZ256rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2DQSZrrkz, X86::VCVTTPD2DQSZrmbkz, TB_BCAST_SD}, {X86::VCVTTPD2DQZ128rrkz, X86::VCVTTPD2DQZ128rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2DQZ256rrkz, X86::VCVTTPD2DQZ256rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2DQZrrkz, X86::VCVTTPD2DQZrmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZ128rrkz, X86::VCVTTPD2QQSZ128rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZ256rrkz, X86::VCVTTPD2QQSZ256rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZrrkz, X86::VCVTTPD2QQSZrmbkz, TB_BCAST_SD}, {X86::VCVTTPD2QQZ128rrkz, X86::VCVTTPD2QQZ128rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2QQZ256rrkz, X86::VCVTTPD2QQZ256rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2QQZrrkz, X86::VCVTTPD2QQZrmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZ128rrkz, X86::VCVTTPD2UDQSZ128rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZ256rrkz, X86::VCVTTPD2UDQSZ256rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZrrkz, X86::VCVTTPD2UDQSZrmbkz, TB_BCAST_SD}, {X86::VCVTTPD2UDQZ128rrkz, X86::VCVTTPD2UDQZ128rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2UDQZ256rrkz, X86::VCVTTPD2UDQZ256rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2UDQZrrkz, X86::VCVTTPD2UDQZrmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZ128rrkz, X86::VCVTTPD2UQQSZ128rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZ256rrkz, X86::VCVTTPD2UQQSZ256rmbkz, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZrrkz, X86::VCVTTPD2UQQSZrmbkz, TB_BCAST_SD}, {X86::VCVTTPD2UQQZ128rrkz, X86::VCVTTPD2UQQZ128rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2UQQZ256rrkz, X86::VCVTTPD2UQQZ256rmbkz, TB_BCAST_SD}, {X86::VCVTTPD2UQQZrrkz, X86::VCVTTPD2UQQZrmbkz, TB_BCAST_SD}, @@ -7913,6 +8037,9 @@ static const X86FoldTableEntry BroadcastTable2[] = { {X86::VCVTTPH2WZ128rrkz, X86::VCVTTPH2WZ128rmbkz, TB_BCAST_SH}, {X86::VCVTTPH2WZ256rrkz, X86::VCVTTPH2WZ256rmbkz, TB_BCAST_SH}, {X86::VCVTTPH2WZrrkz, X86::VCVTTPH2WZrmbkz, TB_BCAST_SH}, + {X86::VCVTTPS2DQSZ128rrkz, X86::VCVTTPS2DQSZ128rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2DQSZ256rrkz, X86::VCVTTPS2DQSZ256rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2DQSZrrkz, X86::VCVTTPS2DQSZrmbkz, TB_BCAST_SS}, {X86::VCVTTPS2DQZ128rrkz, X86::VCVTTPS2DQZ128rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2DQZ256rrkz, X86::VCVTTPS2DQZ256rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2DQZrrkz, X86::VCVTTPS2DQZrmbkz, TB_BCAST_SS}, @@ -7922,12 +8049,21 @@ static const X86FoldTableEntry BroadcastTable2[] = { {X86::VCVTTPS2IUBSZ128rrkz, X86::VCVTTPS2IUBSZ128rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2IUBSZ256rrkz, X86::VCVTTPS2IUBSZ256rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2IUBSZrrkz, X86::VCVTTPS2IUBSZrmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZ128rrkz, X86::VCVTTPS2QQSZ128rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZ256rrkz, X86::VCVTTPS2QQSZ256rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZrrkz, X86::VCVTTPS2QQSZrmbkz, TB_BCAST_SS}, {X86::VCVTTPS2QQZ128rrkz, X86::VCVTTPS2QQZ128rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2QQZ256rrkz, X86::VCVTTPS2QQZ256rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2QQZrrkz, X86::VCVTTPS2QQZrmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZ128rrkz, X86::VCVTTPS2UDQSZ128rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZ256rrkz, X86::VCVTTPS2UDQSZ256rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZrrkz, X86::VCVTTPS2UDQSZrmbkz, TB_BCAST_SS}, {X86::VCVTTPS2UDQZ128rrkz, X86::VCVTTPS2UDQZ128rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2UDQZ256rrkz, X86::VCVTTPS2UDQZ256rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2UDQZrrkz, X86::VCVTTPS2UDQZrmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZ128rrkz, X86::VCVTTPS2UQQSZ128rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZ256rrkz, X86::VCVTTPS2UQQSZ256rmbkz, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZrrkz, X86::VCVTTPS2UQQSZrmbkz, TB_BCAST_SS}, {X86::VCVTTPS2UQQZ128rrkz, X86::VCVTTPS2UQQZ128rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2UQQZ256rrkz, X86::VCVTTPS2UQQZ256rmbkz, TB_BCAST_SS}, {X86::VCVTTPS2UQQZrrkz, X86::VCVTTPS2UQQZrmbkz, TB_BCAST_SS}, @@ -8677,15 +8813,27 @@ static const X86FoldTableEntry BroadcastTable3[] = { {X86::VCVTTNEBF162IUBSZ128rrk, X86::VCVTTNEBF162IUBSZ128rmbk, TB_BCAST_SH}, {X86::VCVTTNEBF162IUBSZ256rrk, X86::VCVTTNEBF162IUBSZ256rmbk, TB_BCAST_SH}, {X86::VCVTTNEBF162IUBSZrrk, X86::VCVTTNEBF162IUBSZrmbk, TB_BCAST_SH}, + {X86::VCVTTPD2DQSZ128rrk, X86::VCVTTPD2DQSZ128rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2DQSZ256rrk, X86::VCVTTPD2DQSZ256rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2DQSZrrk, X86::VCVTTPD2DQSZrmbk, TB_BCAST_SD}, {X86::VCVTTPD2DQZ128rrk, X86::VCVTTPD2DQZ128rmbk, TB_BCAST_SD}, {X86::VCVTTPD2DQZ256rrk, X86::VCVTTPD2DQZ256rmbk, TB_BCAST_SD}, {X86::VCVTTPD2DQZrrk, X86::VCVTTPD2DQZrmbk, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZ128rrk, X86::VCVTTPD2QQSZ128rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZ256rrk, X86::VCVTTPD2QQSZ256rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2QQSZrrk, X86::VCVTTPD2QQSZrmbk, TB_BCAST_SD}, {X86::VCVTTPD2QQZ128rrk, X86::VCVTTPD2QQZ128rmbk, TB_BCAST_SD}, {X86::VCVTTPD2QQZ256rrk, X86::VCVTTPD2QQZ256rmbk, TB_BCAST_SD}, {X86::VCVTTPD2QQZrrk, X86::VCVTTPD2QQZrmbk, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZ128rrk, X86::VCVTTPD2UDQSZ128rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZ256rrk, X86::VCVTTPD2UDQSZ256rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2UDQSZrrk, X86::VCVTTPD2UDQSZrmbk, TB_BCAST_SD}, {X86::VCVTTPD2UDQZ128rrk, X86::VCVTTPD2UDQZ128rmbk, TB_BCAST_SD}, {X86::VCVTTPD2UDQZ256rrk, X86::VCVTTPD2UDQZ256rmbk, TB_BCAST_SD}, {X86::VCVTTPD2UDQZrrk, X86::VCVTTPD2UDQZrmbk, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZ128rrk, X86::VCVTTPD2UQQSZ128rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZ256rrk, X86::VCVTTPD2UQQSZ256rmbk, TB_BCAST_SD}, + {X86::VCVTTPD2UQQSZrrk, X86::VCVTTPD2UQQSZrmbk, TB_BCAST_SD}, {X86::VCVTTPD2UQQZ128rrk, X86::VCVTTPD2UQQZ128rmbk, TB_BCAST_SD}, {X86::VCVTTPD2UQQZ256rrk, X86::VCVTTPD2UQQZ256rmbk, TB_BCAST_SD}, {X86::VCVTTPD2UQQZrrk, X86::VCVTTPD2UQQZrmbk, TB_BCAST_SD}, @@ -8713,6 +8861,9 @@ static const X86FoldTableEntry BroadcastTable3[] = { {X86::VCVTTPH2WZ128rrk, X86::VCVTTPH2WZ128rmbk, TB_BCAST_SH}, {X86::VCVTTPH2WZ256rrk, X86::VCVTTPH2WZ256rmbk, TB_BCAST_SH}, {X86::VCVTTPH2WZrrk, X86::VCVTTPH2WZrmbk, TB_BCAST_SH}, + {X86::VCVTTPS2DQSZ128rrk, X86::VCVTTPS2DQSZ128rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2DQSZ256rrk, X86::VCVTTPS2DQSZ256rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2DQSZrrk, X86::VCVTTPS2DQSZrmbk, TB_BCAST_SS}, {X86::VCVTTPS2DQZ128rrk, X86::VCVTTPS2DQZ128rmbk, TB_BCAST_SS}, {X86::VCVTTPS2DQZ256rrk, X86::VCVTTPS2DQZ256rmbk, TB_BCAST_SS}, {X86::VCVTTPS2DQZrrk, X86::VCVTTPS2DQZrmbk, TB_BCAST_SS}, @@ -8722,12 +8873,21 @@ static const X86FoldTableEntry BroadcastTable3[] = { {X86::VCVTTPS2IUBSZ128rrk, X86::VCVTTPS2IUBSZ128rmbk, TB_BCAST_SS}, {X86::VCVTTPS2IUBSZ256rrk, X86::VCVTTPS2IUBSZ256rmbk, TB_BCAST_SS}, {X86::VCVTTPS2IUBSZrrk, X86::VCVTTPS2IUBSZrmbk, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZ128rrk, X86::VCVTTPS2QQSZ128rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZ256rrk, X86::VCVTTPS2QQSZ256rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2QQSZrrk, X86::VCVTTPS2QQSZrmbk, TB_BCAST_SS}, {X86::VCVTTPS2QQZ128rrk, X86::VCVTTPS2QQZ128rmbk, TB_BCAST_SS}, {X86::VCVTTPS2QQZ256rrk, X86::VCVTTPS2QQZ256rmbk, TB_BCAST_SS}, {X86::VCVTTPS2QQZrrk, X86::VCVTTPS2QQZrmbk, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZ128rrk, X86::VCVTTPS2UDQSZ128rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZ256rrk, X86::VCVTTPS2UDQSZ256rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2UDQSZrrk, X86::VCVTTPS2UDQSZrmbk, TB_BCAST_SS}, {X86::VCVTTPS2UDQZ128rrk, X86::VCVTTPS2UDQZ128rmbk, TB_BCAST_SS}, {X86::VCVTTPS2UDQZ256rrk, X86::VCVTTPS2UDQZ256rmbk, TB_BCAST_SS}, {X86::VCVTTPS2UDQZrrk, X86::VCVTTPS2UDQZrmbk, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZ128rrk, X86::VCVTTPS2UQQSZ128rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZ256rrk, X86::VCVTTPS2UQQSZ256rmbk, TB_BCAST_SS}, + {X86::VCVTTPS2UQQSZrrk, X86::VCVTTPS2UQQSZrmbk, TB_BCAST_SS}, {X86::VCVTTPS2UQQZ128rrk, X86::VCVTTPS2UQQZ128rmbk, TB_BCAST_SS}, {X86::VCVTTPS2UQQZ256rrk, X86::VCVTTPS2UQQZ256rmbk, TB_BCAST_SS}, {X86::VCVTTPS2UQQZrrk, X86::VCVTTPS2UQQZrmbk, TB_BCAST_SS}, diff --git a/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll b/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll index 0166656..33e2958 100644 --- a/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll +++ b/llvm/test/Transforms/VectorCombine/AArch64/shrink-types.ll @@ -73,4 +73,31 @@ entry: ret i32 %6 } +define i32 @phi_bug(<16 x i32> %a, ptr %b) { +; CHECK-LABEL: @phi_bug( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[B:%.*]], align 1 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[A_PHI:%.*]] = phi <16 x i32> [ [[A:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[WIDE_LOAD_PHI:%.*]] = phi <16 x i8> [ [[WIDE_LOAD]], [[ENTRY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = trunc <16 x i32> [[A_PHI]] to <16 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = and <16 x i8> [[WIDE_LOAD_PHI]], [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[TMP1]] to <16 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP2]]) +; CHECK-NEXT: ret i32 [[TMP3]] +; +entry: + %wide.load = load <16 x i8>, ptr %b, align 1 + br label %vector.body + +vector.body: + %a.phi = phi <16 x i32> [ %a, %entry ] + %wide.load.phi = phi <16 x i8> [ %wide.load, %entry ] + %0 = zext <16 x i8> %wide.load.phi to <16 x i32> + %1 = and <16 x i32> %0, %a.phi + %2 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %1) + ret i32 %2 +} + declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) diff --git a/llvm/test/tools/llvm-mca/X86/Znver4/zero-idioms.s b/llvm/test/tools/llvm-mca/X86/Znver4/zero-idioms.s index cc3c286..b6ebd93 100644 --- a/llvm/test/tools/llvm-mca/X86/Znver4/zero-idioms.s +++ b/llvm/test/tools/llvm-mca/X86/Znver4/zero-idioms.s @@ -161,13 +161,13 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK: Iterations: 1 # CHECK-NEXT: Instructions: 139 -# CHECK-NEXT: Total Cycles: 42 +# CHECK-NEXT: Total Cycles: 40 # CHECK-NEXT: Total uOps: 139 # CHECK: Dispatch Width: 6 -# CHECK-NEXT: uOps Per Cycle: 3.31 -# CHECK-NEXT: IPC: 3.31 -# CHECK-NEXT: Block RThroughput: 25.8 +# CHECK-NEXT: uOps Per Cycle: 3.48 +# CHECK-NEXT: IPC: 3.48 +# CHECK-NEXT: Block RThroughput: 24.8 # CHECK: Instruction Info: # CHECK-NEXT: [1]: #uOps @@ -301,7 +301,7 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK-NEXT: 1 1 0.25 vpxorq %xmm19, %xmm19, %xmm19 # CHECK-NEXT: 1 1 0.25 vpxord %ymm19, %ymm19, %ymm19 # CHECK-NEXT: 1 1 0.25 vpxorq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: 1 1 0.50 vpxord %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: 1 0 0.17 vpxord %zmm19, %zmm19, %zmm19 # CHECK-NEXT: 1 1 0.50 vpxorq %zmm19, %zmm19, %zmm19 # CHECK-NEXT: 1 0 0.17 vxorps %xmm4, %xmm4, %xmm5 # CHECK-NEXT: 1 0 0.17 vxorpd %xmm1, %xmm1, %xmm3 @@ -315,17 +315,17 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK-NEXT: 1 1 0.25 vpxorq %xmm19, %xmm19, %xmm21 # CHECK-NEXT: 1 1 0.25 vpxord %ymm19, %ymm19, %ymm21 # CHECK-NEXT: 1 1 0.25 vpxorq %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: 1 1 0.50 vpxord %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: 1 0 0.17 vpxord %zmm19, %zmm19, %zmm21 # CHECK-NEXT: 1 1 0.50 vpxorq %zmm19, %zmm19, %zmm21 # CHECK: Register File statistics: -# CHECK-NEXT: Total number of mappings created: 65 -# CHECK-NEXT: Max number of mappings used: 45 +# CHECK-NEXT: Total number of mappings created: 63 +# CHECK-NEXT: Max number of mappings used: 43 # CHECK: * Register File #1 -- Zn4FpPRF: # CHECK-NEXT: Number of physical registers: 192 -# CHECK-NEXT: Total number of mappings created: 65 -# CHECK-NEXT: Max number of mappings used: 45 +# CHECK-NEXT: Total number of mappings created: 63 +# CHECK-NEXT: Max number of mappings used: 43 # CHECK: * Register File #2 -- Zn4IntegerPRF: # CHECK-NEXT: Number of physical registers: 224 @@ -359,7 +359,7 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK: Resource pressure per iteration: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] -# CHECK-NEXT: - - - - - - - - 25.00 25.00 27.00 26.00 - - - - - - - - - - - +# CHECK-NEXT: - - - - - - - - 24.00 25.00 25.00 25.00 - - - - - - - - - - - # CHECK: Resource pressure by instruction: # CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12.0] [12.1] [13] [14.0] [14.1] [14.2] [15.0] [15.1] [15.2] [16.0] [16.1] Instructions: @@ -453,9 +453,9 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK-NEXT: - - - - - - - - 1.00 - - - - - - - - - - - - - - vpandnd %xmm19, %xmm19, %xmm19 # CHECK-NEXT: - - - - - - - - - - - 1.00 - - - - - - - - - - - vpandnq %xmm19, %xmm19, %xmm19 # CHECK-NEXT: - - - - - - - - - - - 1.00 - - - - - - - - - - - vpandnd %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpandnq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: - - - - - - - - 2.00 - - - - - - - - - - - - - - vpandnd %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: - - - - - - - - - - - 2.00 - - - - - - - - - - - vpandnq %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - - - - - - - - - vpandnq %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: - - - - - - - - - 2.00 - - - - - - - - - - - - - vpandnd %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: - - - - - - - - 2.00 - - - - - - - - - - - - - - vpandnq %zmm19, %zmm19, %zmm19 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vandnps %xmm2, %xmm2, %xmm5 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vandnpd %xmm1, %xmm1, %xmm5 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpandn %xmm3, %xmm3, %xmm5 @@ -478,174 +478,174 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vxorpd %ymm1, %ymm1, %ymm1 # CHECK-NEXT: - - - - - - - - - - - 2.00 - - - - - - - - - - - vxorps %zmm2, %zmm2, %zmm2 # CHECK-NEXT: - - - - - - - - - - 2.00 - - - - - - - - - - - - vxorpd %zmm1, %zmm1, %zmm1 -# CHECK-NEXT: - - - - - - - - - - 1.00 - - - - - - - - - - - - pxor %mm2, %mm2 -# CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - pxor %xmm2, %xmm2 +# CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - pxor %mm2, %mm2 +# CHECK-NEXT: - - - - - - - - 1.00 - - - - - - - - - - - - - - pxor %xmm2, %xmm2 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxor %xmm3, %xmm3, %xmm3 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxor %ymm3, %ymm3, %ymm3 # CHECK-NEXT: - - - - - - - - 1.00 - - - - - - - - - - - - - - vpxord %xmm19, %xmm19, %xmm19 # CHECK-NEXT: - - - - - - - - - - - 1.00 - - - - - - - - - - - vpxorq %xmm19, %xmm19, %xmm19 # CHECK-NEXT: - - - - - - - - - - 1.00 - - - - - - - - - - - - vpxord %ymm19, %ymm19, %ymm19 # CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpxorq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: - - - - - - - - 2.00 - - - - - - - - - - - - - - vpxord %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: - - - - - - - - - - - 2.00 - - - - - - - - - - - vpxorq %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxord %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: - - - - - - - - - - 2.00 - - - - - - - - - - - - vpxorq %zmm19, %zmm19, %zmm19 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vxorps %xmm4, %xmm4, %xmm5 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vxorpd %xmm1, %xmm1, %xmm3 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vxorps %ymm4, %ymm4, %ymm5 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vxorpd %ymm1, %ymm1, %ymm3 -# CHECK-NEXT: - - - - - - - - 2.00 - - - - - - - - - - - - - - vxorps %zmm4, %zmm4, %zmm5 -# CHECK-NEXT: - - - - - - - - - - 2.00 - - - - - - - - - - - - vxorpd %zmm1, %zmm1, %zmm3 +# CHECK-NEXT: - - - - - - - - - 2.00 - - - - - - - - - - - - - vxorps %zmm4, %zmm4, %zmm5 +# CHECK-NEXT: - - - - - - - - 2.00 - - - - - - - - - - - - - - vxorpd %zmm1, %zmm1, %zmm3 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxor %xmm3, %xmm3, %xmm5 # CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxor %ymm3, %ymm3, %ymm5 # CHECK-NEXT: - - - - - - - - - - 1.00 - - - - - - - - - - - - vpxord %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: - - - - - - - - - 1.00 - - - - - - - - - - - - - vpxorq %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: - - - - - - - - 1.00 - - - - - - - - - - - - - - vpxord %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - - - - - - - - - vpxorq %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: - - - - - - - - - - 2.00 - - - - - - - - - - - - vpxord %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: - - - - - - - - - 2.00 - - - - - - - - - - - - - vpxorq %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: - - - - - - - - 1.00 - - - - - - - - - - - - - - vpxorq %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: - - - - - - - - - - - 1.00 - - - - - - - - - - - vpxord %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: - - - - - - - - - - 1.00 - - - - - - - - - - - - vpxorq %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: - - - - - - - - - - - - - - - - - - - - - - - vpxord %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: - - - - - - - - - - - 2.00 - - - - - - - - - - - vpxorq %zmm19, %zmm19, %zmm21 # CHECK: Timeline view: # CHECK-NEXT: 0123456789 0123456789 -# CHECK-NEXT: Index 0123456789 0123456789 01 - -# CHECK: [0,0] DR . . . . . . . .. subl %eax, %eax -# CHECK-NEXT: [0,1] DR . . . . . . . .. subq %rax, %rax -# CHECK-NEXT: [0,2] DR . . . . . . . .. xorl %eax, %eax -# CHECK-NEXT: [0,3] DR . . . . . . . .. xorq %rax, %rax -# CHECK-NEXT: [0,4] DeER . . . . . . . .. pcmpgtb %mm2, %mm2 -# CHECK-NEXT: [0,5] D=eER. . . . . . . .. pcmpgtd %mm2, %mm2 -# CHECK-NEXT: [0,6] .D=eER . . . . . . .. pcmpgtw %mm2, %mm2 -# CHECK-NEXT: [0,7] .DeE-R . . . . . . .. pcmpgtb %xmm2, %xmm2 -# CHECK-NEXT: [0,8] .DeE-R . . . . . . .. pcmpgtd %xmm2, %xmm2 -# CHECK-NEXT: [0,9] .DeE-R . . . . . . .. pcmpgtq %xmm2, %xmm2 -# CHECK-NEXT: [0,10] .D=eER . . . . . . .. pcmpgtw %xmm2, %xmm2 -# CHECK-NEXT: [0,11] .D---R . . . . . . .. vpcmpgtb %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,12] . D--R . . . . . . .. vpcmpgtd %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,13] . D--R . . . . . . .. vpcmpgtq %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,14] . D--R . . . . . . .. vpcmpgtw %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,15] . D---R . . . . . . .. vpcmpgtb %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,16] . D---R . . . . . . .. vpcmpgtd %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,17] . D---R . . . . . . .. vpcmpgtq %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,18] . D--R . . . . . . .. vpcmpgtw %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,19] . D--R . . . . . . .. vpcmpgtb %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,20] . D--R . . . . . . .. vpcmpgtd %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,21] . D--R . . . . . . .. vpcmpgtq %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,22] . D--R . . . . . . .. vpcmpgtw %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,23] . D--R . . . . . . .. vpcmpgtb %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,24] . D--R . . . . . . .. vpcmpgtd %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,25] . D--R . . . . . . .. vpcmpgtq %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,26] . D--R . . . . . . .. vpcmpgtw %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,27] . DeER . . . . . . .. psubb %mm2, %mm2 -# CHECK-NEXT: [0,28] . D=eER . . . . . . .. psubd %mm2, %mm2 -# CHECK-NEXT: [0,29] . D==eER. . . . . . .. psubq %mm2, %mm2 -# CHECK-NEXT: [0,30] . D==eER . . . . . .. psubw %mm2, %mm2 -# CHECK-NEXT: [0,31] . DeE--R . . . . . .. psubb %xmm2, %xmm2 -# CHECK-NEXT: [0,32] . DeE--R . . . . . .. psubd %xmm2, %xmm2 -# CHECK-NEXT: [0,33] . DeE--R . . . . . .. psubq %xmm2, %xmm2 -# CHECK-NEXT: [0,34] . D=eE-R . . . . . .. psubw %xmm2, %xmm2 -# CHECK-NEXT: [0,35] . D----R . . . . . .. vpsubb %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,36] . .D---R . . . . . .. vpsubd %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,37] . .D---R . . . . . .. vpsubq %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,38] . .D---R . . . . . .. vpsubw %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,39] . .D----R . . . . . .. vpsubb %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,40] . .D----R . . . . . .. vpsubd %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,41] . .D----R . . . . . .. vpsubq %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,42] . . D---R . . . . . .. vpsubw %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,43] . . D---R . . . . . .. vpsubb %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,44] . . D---R . . . . . .. vpsubd %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,45] . . D---R . . . . . .. vpsubq %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,46] . . D---R . . . . . .. vpsubw %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,47] . . D---R . . . . . .. vpsubb %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,48] . . D---R . . . . . .. vpsubd %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,49] . . D---R . . . . . .. vpsubq %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,50] . . D---R . . . . . .. vpsubw %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,51] . . DeE-R . . . . . .. vpsubb %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,52] . . D=eER . . . . . .. vpsubd %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,53] . . D==eER . . . . . .. vpsubq %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,54] . . D==eER. . . . . .. vpsubw %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,55] . . D===eER . . . . .. vpsubb %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,56] . . D====eER . . . . .. vpsubd %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,57] . . D=====eER . . . . .. vpsubq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,58] . . D======eER . . . . .. vpsubw %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,59] . . D=======eER. . . . .. vpsubb %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,60] . . D=======eER . . . .. vpsubd %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,61] . . D========eER . . . .. vpsubq %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,62] . . D=========eER . . . .. vpsubw %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,63] . . D==========eER . . . .. vpsubb %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,64] . . D===========eER. . . .. vpsubd %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,65] . . D===========eER. . . .. vpsubq %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,66] . . .D==========eER. . . .. vpsubw %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,67] . . .D==========eER. . . .. vpsubb %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,68] . . .D===========eER . . .. vpsubd %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,69] . . .D===========eER . . .. vpsubq %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,70] . . .D===========eER . . .. vpsubw %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,71] . . .D===========eER . . .. vpsubb %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,72] . . . D===========eER . . .. vpsubd %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,73] . . . D===========eER . . .. vpsubq %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,74] . . . D===========eER . . .. vpsubw %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,75] . . . DeE-----------R . . .. andnps %xmm0, %xmm0 -# CHECK-NEXT: [0,76] . . . DeE-----------R . . .. andnpd %xmm1, %xmm1 -# CHECK-NEXT: [0,77] . . . D-------------R . . .. vandnps %xmm2, %xmm2, %xmm2 -# CHECK-NEXT: [0,78] . . . D------------R . . .. vandnpd %xmm1, %xmm1, %xmm1 -# CHECK-NEXT: [0,79] . . . D------------R . . .. vandnps %ymm2, %ymm2, %ymm2 -# CHECK-NEXT: [0,80] . . . D------------R . . .. vandnpd %ymm1, %ymm1, %ymm1 -# CHECK-NEXT: [0,81] . . . DeE-----------R . . .. vandnps %zmm2, %zmm2, %zmm2 -# CHECK-NEXT: [0,82] . . . DeE-----------R . . .. vandnpd %zmm1, %zmm1, %zmm1 -# CHECK-NEXT: [0,83] . . . DeE-----------R . . .. pandn %mm2, %mm2 -# CHECK-NEXT: [0,84] . . . DeE----------R . . .. pandn %xmm2, %xmm2 -# CHECK-NEXT: [0,85] . . . D------------R . . .. vpandn %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,86] . . . D------------R . . .. vpandn %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,87] . . . D==========eER . . .. vpandnd %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,88] . . . D===========eER . . .. vpandnq %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,89] . . . D============eER. . .. vpandnd %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,90] . . . D============eER . .. vpandnq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,91] . . . D=============eER . .. vpandnd %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,92] . . . D==============eER . .. vpandnq %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,93] . . . D----------------R . .. vandnps %xmm2, %xmm2, %xmm5 -# CHECK-NEXT: [0,94] . . . D----------------R . .. vandnpd %xmm1, %xmm1, %xmm5 -# CHECK-NEXT: [0,95] . . . D----------------R . .. vpandn %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,96] . . . .D---------------R . .. vandnps %ymm2, %ymm2, %ymm5 -# CHECK-NEXT: [0,97] . . . .D---------------R . .. vandnpd %ymm1, %ymm1, %ymm5 -# CHECK-NEXT: [0,98] . . . .D---------------R . .. vpandn %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,99] . . . .DeE-------------R . .. vandnps %zmm2, %zmm2, %zmm5 -# CHECK-NEXT: [0,100] . . . .DeE-------------R . .. vandnpd %zmm1, %zmm1, %zmm5 -# CHECK-NEXT: [0,101] . . . .D==============eER . .. vpandnd %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,102] . . . . D=============eER . .. vpandnq %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,103] . . . . D=============eER . .. vpandnd %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,104] . . . . D==============eER. .. vpandnq %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,105] . . . . D==============eER. .. vpandnd %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,106] . . . . D==============eER. .. vpandnq %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,107] . . . . D=eE-------------R. .. xorps %xmm0, %xmm0 -# CHECK-NEXT: [0,108] . . . . DeE-------------R. .. xorpd %xmm1, %xmm1 -# CHECK-NEXT: [0,109] . . . . D---------------R. .. vxorps %xmm2, %xmm2, %xmm2 -# CHECK-NEXT: [0,110] . . . . D---------------R. .. vxorpd %xmm1, %xmm1, %xmm1 -# CHECK-NEXT: [0,111] . . . . D---------------R. .. vxorps %ymm2, %ymm2, %ymm2 -# CHECK-NEXT: [0,112] . . . . D---------------R. .. vxorpd %ymm1, %ymm1, %ymm1 -# CHECK-NEXT: [0,113] . . . . D=eE-------------R .. vxorps %zmm2, %zmm2, %zmm2 -# CHECK-NEXT: [0,114] . . . . DeE-------------R .. vxorpd %zmm1, %zmm1, %zmm1 -# CHECK-NEXT: [0,115] . . . . D======eE-------R .. pxor %mm2, %mm2 -# CHECK-NEXT: [0,116] . . . . D======eE-------R .. pxor %xmm2, %xmm2 -# CHECK-NEXT: [0,117] . . . . D---------------R .. vpxor %xmm3, %xmm3, %xmm3 -# CHECK-NEXT: [0,118] . . . . D---------------R .. vpxor %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: [0,119] . . . . D============eE-R .. vpxord %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,120] . . . . D============eER .. vpxorq %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: [0,121] . . . . D=============eER .. vpxord %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,122] . . . . D==============eER .. vpxorq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: [0,123] . . . . D===============eER .. vpxord %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,124] . . . . D================eER.. vpxorq %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: [0,125] . . . . D------------------R.. vxorps %xmm4, %xmm4, %xmm5 -# CHECK-NEXT: [0,126] . . . . .D-----------------R.. vxorpd %xmm1, %xmm1, %xmm3 -# CHECK-NEXT: [0,127] . . . . .D-----------------R.. vxorps %ymm4, %ymm4, %ymm5 -# CHECK-NEXT: [0,128] . . . . .D-----------------R.. vxorpd %ymm1, %ymm1, %ymm3 -# CHECK-NEXT: [0,129] . . . . .D====eE-----------R.. vxorps %zmm4, %zmm4, %zmm5 -# CHECK-NEXT: [0,130] . . . . .D=====eE----------R.. vxorpd %zmm1, %zmm1, %zmm3 -# CHECK-NEXT: [0,131] . . . . .D-----------------R.. vpxor %xmm3, %xmm3, %xmm5 -# CHECK-NEXT: [0,132] . . . . . D----------------R.. vpxor %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: [0,133] . . . . . D===============eER. vpxord %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,134] . . . . . D===============eER. vpxorq %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: [0,135] . . . . . D===============eER. vpxord %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,136] . . . . . D================eER vpxorq %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: [0,137] . . . . . D================eER vpxord %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: [0,138] . . . . . D===============eER vpxorq %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: Index 0123456789 0123456789 + +# CHECK: [0,0] DR . . . . . . . . subl %eax, %eax +# CHECK-NEXT: [0,1] DR . . . . . . . . subq %rax, %rax +# CHECK-NEXT: [0,2] DR . . . . . . . . xorl %eax, %eax +# CHECK-NEXT: [0,3] DR . . . . . . . . xorq %rax, %rax +# CHECK-NEXT: [0,4] DeER . . . . . . . . pcmpgtb %mm2, %mm2 +# CHECK-NEXT: [0,5] D=eER. . . . . . . . pcmpgtd %mm2, %mm2 +# CHECK-NEXT: [0,6] .D=eER . . . . . . . pcmpgtw %mm2, %mm2 +# CHECK-NEXT: [0,7] .DeE-R . . . . . . . pcmpgtb %xmm2, %xmm2 +# CHECK-NEXT: [0,8] .DeE-R . . . . . . . pcmpgtd %xmm2, %xmm2 +# CHECK-NEXT: [0,9] .DeE-R . . . . . . . pcmpgtq %xmm2, %xmm2 +# CHECK-NEXT: [0,10] .D=eER . . . . . . . pcmpgtw %xmm2, %xmm2 +# CHECK-NEXT: [0,11] .D---R . . . . . . . vpcmpgtb %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,12] . D--R . . . . . . . vpcmpgtd %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,13] . D--R . . . . . . . vpcmpgtq %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,14] . D--R . . . . . . . vpcmpgtw %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,15] . D---R . . . . . . . vpcmpgtb %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,16] . D---R . . . . . . . vpcmpgtd %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,17] . D---R . . . . . . . vpcmpgtq %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,18] . D--R . . . . . . . vpcmpgtw %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,19] . D--R . . . . . . . vpcmpgtb %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,20] . D--R . . . . . . . vpcmpgtd %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,21] . D--R . . . . . . . vpcmpgtq %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,22] . D--R . . . . . . . vpcmpgtw %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,23] . D--R . . . . . . . vpcmpgtb %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,24] . D--R . . . . . . . vpcmpgtd %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,25] . D--R . . . . . . . vpcmpgtq %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,26] . D--R . . . . . . . vpcmpgtw %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,27] . DeER . . . . . . . psubb %mm2, %mm2 +# CHECK-NEXT: [0,28] . D=eER . . . . . . . psubd %mm2, %mm2 +# CHECK-NEXT: [0,29] . D==eER. . . . . . . psubq %mm2, %mm2 +# CHECK-NEXT: [0,30] . D==eER . . . . . . psubw %mm2, %mm2 +# CHECK-NEXT: [0,31] . DeE--R . . . . . . psubb %xmm2, %xmm2 +# CHECK-NEXT: [0,32] . DeE--R . . . . . . psubd %xmm2, %xmm2 +# CHECK-NEXT: [0,33] . DeE--R . . . . . . psubq %xmm2, %xmm2 +# CHECK-NEXT: [0,34] . D=eE-R . . . . . . psubw %xmm2, %xmm2 +# CHECK-NEXT: [0,35] . D----R . . . . . . vpsubb %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,36] . .D---R . . . . . . vpsubd %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,37] . .D---R . . . . . . vpsubq %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,38] . .D---R . . . . . . vpsubw %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,39] . .D----R . . . . . . vpsubb %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,40] . .D----R . . . . . . vpsubd %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,41] . .D----R . . . . . . vpsubq %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,42] . . D---R . . . . . . vpsubw %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,43] . . D---R . . . . . . vpsubb %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,44] . . D---R . . . . . . vpsubd %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,45] . . D---R . . . . . . vpsubq %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,46] . . D---R . . . . . . vpsubw %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,47] . . D---R . . . . . . vpsubb %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,48] . . D---R . . . . . . vpsubd %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,49] . . D---R . . . . . . vpsubq %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,50] . . D---R . . . . . . vpsubw %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,51] . . DeE-R . . . . . . vpsubb %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,52] . . D=eER . . . . . . vpsubd %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,53] . . D==eER . . . . . . vpsubq %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,54] . . D==eER. . . . . . vpsubw %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,55] . . D===eER . . . . . vpsubb %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,56] . . D====eER . . . . . vpsubd %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,57] . . D=====eER . . . . . vpsubq %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,58] . . D======eER . . . . . vpsubw %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,59] . . D=======eER. . . . . vpsubb %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,60] . . D=======eER . . . . vpsubd %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,61] . . D========eER . . . . vpsubq %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,62] . . D=========eER . . . . vpsubw %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,63] . . D==========eER . . . . vpsubb %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,64] . . D===========eER. . . . vpsubd %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,65] . . D===========eER. . . . vpsubq %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,66] . . .D==========eER. . . . vpsubw %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,67] . . .D==========eER. . . . vpsubb %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,68] . . .D===========eER . . . vpsubd %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,69] . . .D===========eER . . . vpsubq %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,70] . . .D===========eER . . . vpsubw %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,71] . . .D===========eER . . . vpsubb %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,72] . . . D===========eER . . . vpsubd %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,73] . . . D===========eER . . . vpsubq %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,74] . . . D===========eER . . . vpsubw %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,75] . . . DeE-----------R . . . andnps %xmm0, %xmm0 +# CHECK-NEXT: [0,76] . . . DeE-----------R . . . andnpd %xmm1, %xmm1 +# CHECK-NEXT: [0,77] . . . D-------------R . . . vandnps %xmm2, %xmm2, %xmm2 +# CHECK-NEXT: [0,78] . . . D------------R . . . vandnpd %xmm1, %xmm1, %xmm1 +# CHECK-NEXT: [0,79] . . . D------------R . . . vandnps %ymm2, %ymm2, %ymm2 +# CHECK-NEXT: [0,80] . . . D------------R . . . vandnpd %ymm1, %ymm1, %ymm1 +# CHECK-NEXT: [0,81] . . . DeE-----------R . . . vandnps %zmm2, %zmm2, %zmm2 +# CHECK-NEXT: [0,82] . . . DeE-----------R . . . vandnpd %zmm1, %zmm1, %zmm1 +# CHECK-NEXT: [0,83] . . . DeE-----------R . . . pandn %mm2, %mm2 +# CHECK-NEXT: [0,84] . . . DeE----------R . . . pandn %xmm2, %xmm2 +# CHECK-NEXT: [0,85] . . . D------------R . . . vpandn %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,86] . . . D------------R . . . vpandn %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,87] . . . D==========eER . . . vpandnd %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,88] . . . D===========eER . . . vpandnq %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,89] . . . D============eER. . . vpandnd %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,90] . . . D============eER . . vpandnq %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,91] . . . D=============eER . . vpandnd %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,92] . . . D==============eER . . vpandnq %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,93] . . . D----------------R . . vandnps %xmm2, %xmm2, %xmm5 +# CHECK-NEXT: [0,94] . . . D----------------R . . vandnpd %xmm1, %xmm1, %xmm5 +# CHECK-NEXT: [0,95] . . . D----------------R . . vpandn %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,96] . . . .D---------------R . . vandnps %ymm2, %ymm2, %ymm5 +# CHECK-NEXT: [0,97] . . . .D---------------R . . vandnpd %ymm1, %ymm1, %ymm5 +# CHECK-NEXT: [0,98] . . . .D---------------R . . vpandn %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,99] . . . .DeE-------------R . . vandnps %zmm2, %zmm2, %zmm5 +# CHECK-NEXT: [0,100] . . . .DeE-------------R . . vandnpd %zmm1, %zmm1, %zmm5 +# CHECK-NEXT: [0,101] . . . .D==============eER . . vpandnd %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,102] . . . . D=============eER . . vpandnq %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,103] . . . . D==============eER. . vpandnd %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,104] . . . . D==============eER. . vpandnq %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,105] . . . . D==============eER. . vpandnd %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,106] . . . . D==============eER. . vpandnq %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,107] . . . . D=eE-------------R. . xorps %xmm0, %xmm0 +# CHECK-NEXT: [0,108] . . . . DeE-------------R. . xorpd %xmm1, %xmm1 +# CHECK-NEXT: [0,109] . . . . D---------------R. . vxorps %xmm2, %xmm2, %xmm2 +# CHECK-NEXT: [0,110] . . . . D---------------R. . vxorpd %xmm1, %xmm1, %xmm1 +# CHECK-NEXT: [0,111] . . . . D---------------R. . vxorps %ymm2, %ymm2, %ymm2 +# CHECK-NEXT: [0,112] . . . . D----------------R . vxorpd %ymm1, %ymm1, %ymm1 +# CHECK-NEXT: [0,113] . . . . D=eE-------------R . vxorps %zmm2, %zmm2, %zmm2 +# CHECK-NEXT: [0,114] . . . . DeE-------------R . vxorpd %zmm1, %zmm1, %zmm1 +# CHECK-NEXT: [0,115] . . . . D======eE-------R . pxor %mm2, %mm2 +# CHECK-NEXT: [0,116] . . . . D======eE-------R . pxor %xmm2, %xmm2 +# CHECK-NEXT: [0,117] . . . . D---------------R . vpxor %xmm3, %xmm3, %xmm3 +# CHECK-NEXT: [0,118] . . . . D---------------R . vpxor %ymm3, %ymm3, %ymm3 +# CHECK-NEXT: [0,119] . . . . D=============eER . vpxord %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,120] . . . . D=============eER . vpxorq %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: [0,121] . . . . D==============eER . vpxord %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,122] . . . . D===============eER. vpxorq %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: [0,123] . . . . D-----------------R. vpxord %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,124] . . . . D=====eE----------R. vpxorq %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: [0,125] . . . . D-----------------R. vxorps %xmm4, %xmm4, %xmm5 +# CHECK-NEXT: [0,126] . . . . .D----------------R. vxorpd %xmm1, %xmm1, %xmm3 +# CHECK-NEXT: [0,127] . . . . .D----------------R. vxorps %ymm4, %ymm4, %ymm5 +# CHECK-NEXT: [0,128] . . . . .D----------------R. vxorpd %ymm1, %ymm1, %ymm3 +# CHECK-NEXT: [0,129] . . . . .D=====eE---------R. vxorps %zmm4, %zmm4, %zmm5 +# CHECK-NEXT: [0,130] . . . . .D=====eE---------R. vxorpd %zmm1, %zmm1, %zmm3 +# CHECK-NEXT: [0,131] . . . . .D-----------------R vpxor %xmm3, %xmm3, %xmm5 +# CHECK-NEXT: [0,132] . . . . . D----------------R vpxor %ymm3, %ymm3, %ymm5 +# CHECK-NEXT: [0,133] . . . . . D=====eE---------R vpxord %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,134] . . . . . D======eE--------R vpxorq %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: [0,135] . . . . . D======eE--------R vpxord %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,136] . . . . . D======eE--------R vpxorq %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: [0,137] . . . . . D----------------R vpxord %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: [0,138] . . . . . D======eE-------R vpxorq %zmm19, %zmm19, %zmm21 # CHECK: Average Wait times (based on the timeline view): # CHECK-NEXT: [0]: Executions @@ -757,7 +757,7 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK-NEXT: 100. 1 1.0 1.0 13.0 vandnpd %zmm1, %zmm1, %zmm5 # CHECK-NEXT: 101. 1 15.0 0.0 0.0 vpandnd %xmm19, %xmm19, %xmm21 # CHECK-NEXT: 102. 1 14.0 0.0 0.0 vpandnq %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: 103. 1 14.0 0.0 0.0 vpandnd %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: 103. 1 15.0 1.0 0.0 vpandnd %ymm19, %ymm19, %ymm21 # CHECK-NEXT: 104. 1 15.0 1.0 0.0 vpandnq %ymm19, %ymm19, %ymm21 # CHECK-NEXT: 105. 1 15.0 1.0 0.0 vpandnd %zmm19, %zmm19, %zmm21 # CHECK-NEXT: 106. 1 15.0 1.0 0.0 vpandnq %zmm19, %zmm19, %zmm21 @@ -766,31 +766,31 @@ vpxorq %zmm19, %zmm19, %zmm21 # CHECK-NEXT: 109. 1 0.0 0.0 15.0 vxorps %xmm2, %xmm2, %xmm2 # CHECK-NEXT: 110. 1 0.0 0.0 15.0 vxorpd %xmm1, %xmm1, %xmm1 # CHECK-NEXT: 111. 1 0.0 0.0 15.0 vxorps %ymm2, %ymm2, %ymm2 -# CHECK-NEXT: 112. 1 0.0 0.0 15.0 vxorpd %ymm1, %ymm1, %ymm1 +# CHECK-NEXT: 112. 1 0.0 0.0 16.0 vxorpd %ymm1, %ymm1, %ymm1 # CHECK-NEXT: 113. 1 2.0 2.0 13.0 vxorps %zmm2, %zmm2, %zmm2 # CHECK-NEXT: 114. 1 1.0 1.0 13.0 vxorpd %zmm1, %zmm1, %zmm1 # CHECK-NEXT: 115. 1 7.0 7.0 7.0 pxor %mm2, %mm2 # CHECK-NEXT: 116. 1 7.0 7.0 7.0 pxor %xmm2, %xmm2 # CHECK-NEXT: 117. 1 0.0 0.0 15.0 vpxor %xmm3, %xmm3, %xmm3 # CHECK-NEXT: 118. 1 0.0 0.0 15.0 vpxor %ymm3, %ymm3, %ymm3 -# CHECK-NEXT: 119. 1 13.0 1.0 1.0 vpxord %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: 120. 1 13.0 0.0 0.0 vpxorq %xmm19, %xmm19, %xmm19 -# CHECK-NEXT: 121. 1 14.0 0.0 0.0 vpxord %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: 122. 1 15.0 0.0 0.0 vpxorq %ymm19, %ymm19, %ymm19 -# CHECK-NEXT: 123. 1 16.0 0.0 0.0 vpxord %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: 124. 1 17.0 0.0 0.0 vpxorq %zmm19, %zmm19, %zmm19 -# CHECK-NEXT: 125. 1 0.0 0.0 18.0 vxorps %xmm4, %xmm4, %xmm5 -# CHECK-NEXT: 126. 1 0.0 0.0 17.0 vxorpd %xmm1, %xmm1, %xmm3 -# CHECK-NEXT: 127. 1 0.0 0.0 17.0 vxorps %ymm4, %ymm4, %ymm5 -# CHECK-NEXT: 128. 1 0.0 0.0 17.0 vxorpd %ymm1, %ymm1, %ymm3 -# CHECK-NEXT: 129. 1 5.0 5.0 11.0 vxorps %zmm4, %zmm4, %zmm5 -# CHECK-NEXT: 130. 1 6.0 6.0 10.0 vxorpd %zmm1, %zmm1, %zmm3 +# CHECK-NEXT: 119. 1 14.0 2.0 0.0 vpxord %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: 120. 1 14.0 0.0 0.0 vpxorq %xmm19, %xmm19, %xmm19 +# CHECK-NEXT: 121. 1 15.0 0.0 0.0 vpxord %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: 122. 1 16.0 0.0 0.0 vpxorq %ymm19, %ymm19, %ymm19 +# CHECK-NEXT: 123. 1 0.0 0.0 17.0 vpxord %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: 124. 1 6.0 6.0 10.0 vpxorq %zmm19, %zmm19, %zmm19 +# CHECK-NEXT: 125. 1 0.0 0.0 17.0 vxorps %xmm4, %xmm4, %xmm5 +# CHECK-NEXT: 126. 1 0.0 0.0 16.0 vxorpd %xmm1, %xmm1, %xmm3 +# CHECK-NEXT: 127. 1 0.0 0.0 16.0 vxorps %ymm4, %ymm4, %ymm5 +# CHECK-NEXT: 128. 1 0.0 0.0 16.0 vxorpd %ymm1, %ymm1, %ymm3 +# CHECK-NEXT: 129. 1 6.0 6.0 9.0 vxorps %zmm4, %zmm4, %zmm5 +# CHECK-NEXT: 130. 1 6.0 6.0 9.0 vxorpd %zmm1, %zmm1, %zmm3 # CHECK-NEXT: 131. 1 0.0 0.0 17.0 vpxor %xmm3, %xmm3, %xmm5 # CHECK-NEXT: 132. 1 0.0 0.0 16.0 vpxor %ymm3, %ymm3, %ymm5 -# CHECK-NEXT: 133. 1 16.0 0.0 0.0 vpxord %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: 134. 1 16.0 0.0 0.0 vpxorq %xmm19, %xmm19, %xmm21 -# CHECK-NEXT: 135. 1 16.0 0.0 0.0 vpxord %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: 136. 1 17.0 1.0 0.0 vpxorq %ymm19, %ymm19, %ymm21 -# CHECK-NEXT: 137. 1 17.0 1.0 0.0 vpxord %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: 138. 1 16.0 1.0 0.0 vpxorq %zmm19, %zmm19, %zmm21 -# CHECK-NEXT: 1 4.5 0.6 4.6 <total> +# CHECK-NEXT: 133. 1 6.0 1.0 9.0 vpxord %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: 134. 1 7.0 2.0 8.0 vpxorq %xmm19, %xmm19, %xmm21 +# CHECK-NEXT: 135. 1 7.0 2.0 8.0 vpxord %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: 136. 1 7.0 2.0 8.0 vpxorq %ymm19, %ymm19, %ymm21 +# CHECK-NEXT: 137. 1 0.0 0.0 16.0 vpxord %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: 138. 1 7.0 3.0 7.0 vpxorq %zmm19, %zmm19, %zmm21 +# CHECK-NEXT: 1 3.9 0.7 5.1 <total> diff --git a/llvm/tools/dsymutil/Options.td b/llvm/tools/dsymutil/Options.td index b72ae19..67d3675 100644 --- a/llvm/tools/dsymutil/Options.td +++ b/llvm/tools/dsymutil/Options.td @@ -129,18 +129,18 @@ def: Joined<["--", "-"], "oso-prepend-path=">, Alias<oso_prepend_path>; def object_prefix_map: Separate<["--", "-"], "object-prefix-map">, MetaVarName<"<prefix=remapped>">, - HelpText<"Remap object file paths (but no source paths) before processing." - "Use this for Clang objects where the module cache location was" - "remapped using -fdebug-prefix-map; to help dsymutil" + HelpText<"Remap object file paths (but no source paths) before processing. " + "Use this for Clang objects where the module cache location was " + "remapped using -fdebug-prefix-map; to help dsymutil " "find the Clang module cache.">, Group<grp_general>; def: Joined<["--", "-"], "object-prefix-map=">, Alias<object_prefix_map>; def arch: Separate<["--", "-"], "arch">, MetaVarName<"<arch>">, - HelpText<"Link DWARF debug information only for specified CPU architecture" - "types. This option can be specified multiple times, once for each" - "desired architecture. All CPU architectures will be linked by" + HelpText<"Link DWARF debug information only for specified CPU architecture " + "types. This option can be specified multiple times, once for each " + "desired architecture. All CPU architectures will be linked by " "default.">, Group<grp_general>; def: Joined<["--", "-"], "arch=">, Alias<arch>; @@ -206,7 +206,7 @@ def: Joined<["--", "-"], "linker=">, Alias<linker>; def build_variant_suffix: Separate<["--", "-"], "build-variant-suffix">, MetaVarName<"<suffix=buildvariant>">, - HelpText<"Specify the build variant suffix used to build the executabe file.">, + HelpText<"Specify the build variant suffix used to build the executable file.">, Group<grp_general>; def: Joined<["--", "-"], "build-variant-suffix=">, Alias<build_variant_suffix>; diff --git a/llvm/unittests/Support/FormatVariadicTest.cpp b/llvm/unittests/Support/FormatVariadicTest.cpp index 4f3d179..6893848 100644 --- a/llvm/unittests/Support/FormatVariadicTest.cpp +++ b/llvm/unittests/Support/FormatVariadicTest.cpp @@ -269,7 +269,7 @@ TEST(FormatVariadicTest, MultipleReplacements) { EXPECT_EQ(ReplacementType::Literal, Replacements[3].Type); EXPECT_EQ("-", Replacements[3].Spec); - // {2:bar,-3} - Options=bar, Align=-3 + // {2,-3:bar} - Options=bar, Align=-3 EXPECT_EQ(ReplacementType::Format, Replacements[4].Type); EXPECT_EQ(2u, Replacements[4].Index); EXPECT_EQ(3u, Replacements[4].Width); @@ -277,6 +277,42 @@ TEST(FormatVariadicTest, MultipleReplacements) { EXPECT_EQ("bar", Replacements[4].Options); } +TEST(FormatVariadicTest, AutomaticIndices) { + auto Replacements = parseFormatString("{}"); + ASSERT_EQ(1u, Replacements.size()); + EXPECT_EQ(ReplacementType::Format, Replacements[0].Type); + EXPECT_EQ(0u, Replacements[0].Index); + + Replacements = parseFormatString("{}{}"); + ASSERT_EQ(2u, Replacements.size()); + EXPECT_EQ(ReplacementType::Format, Replacements[0].Type); + EXPECT_EQ(0u, Replacements[0].Index); + EXPECT_EQ(ReplacementType::Format, Replacements[1].Type); + EXPECT_EQ(1u, Replacements[1].Index); + + Replacements = parseFormatString("{}{:foo}{,-3:bar}"); + ASSERT_EQ(3u, Replacements.size()); + EXPECT_EQ(ReplacementType::Format, Replacements[0].Type); + EXPECT_EQ(0u, Replacements[0].Index); + EXPECT_EQ(0u, Replacements[0].Width); + EXPECT_EQ(AlignStyle::Right, Replacements[0].Where); + EXPECT_EQ("", Replacements[0].Options); + + // {:foo} - Options=foo + EXPECT_EQ(ReplacementType::Format, Replacements[1].Type); + EXPECT_EQ(1u, Replacements[1].Index); + EXPECT_EQ(0u, Replacements[1].Width); + EXPECT_EQ(AlignStyle::Right, Replacements[1].Where); + EXPECT_EQ("foo", Replacements[1].Options); + + // {,-3:bar} - Options=bar, Align=-3 + EXPECT_EQ(ReplacementType::Format, Replacements[2].Type); + EXPECT_EQ(2u, Replacements[2].Index); + EXPECT_EQ(3u, Replacements[2].Width); + EXPECT_EQ(AlignStyle::Left, Replacements[2].Where); + EXPECT_EQ("bar", Replacements[2].Options); +} + TEST(FormatVariadicTest, FormatNoReplacements) { EXPECT_EQ("", formatv("").str()); EXPECT_EQ("Test", formatv("Test").str()); @@ -291,6 +327,12 @@ TEST(FormatVariadicTest, FormatBasicTypesOneReplacement) { EXPECT_EQ("Test3", formatv("{0}", std::string("Test3")).str()); } +TEST(FormatVariadicTest, FormatAutomaticIndices) { + EXPECT_EQ("1", formatv("{}", 1).str()); + EXPECT_EQ("c1", formatv("{}{}", 'c', 1).str()); + EXPECT_EQ("c-1rrr-0xFF", formatv("{}-{,r-4}-{:X}", 'c', 1, 255).str()); +} + TEST(FormatVariadicTest, IntegralHexFormatting) { // 1. Trivial cases. Make sure hex is not the default. EXPECT_EQ("0", formatv("{0}", 0).str()); @@ -717,6 +759,8 @@ TEST(FormatVariadicTest, Validate) { EXPECT_DEATH(formatv("{0}", 1, 2).str(), "Expected 1 Args, but got 2"); EXPECT_DEATH(formatv("{0} {2}", 1, 2, 3).str(), "Replacement field indices cannot have holes"); + EXPECT_DEATH(formatv("{}{1}", 0, 1).str(), + "Cannot mix automatic and explicit indices"); #else // GTEST_HAS_DEATH_TEST GTEST_SKIP() << "No support for EXPECT_DEATH"; #endif // GTEST_HAS_DEATH_TEST @@ -724,6 +768,7 @@ TEST(FormatVariadicTest, Validate) { // If asserts are disabled, verify that validation is disabled. EXPECT_EQ(formatv("{0}", 1, 2).str(), "1"); EXPECT_EQ(formatv("{0} {2}", 1, 2, 3).str(), "1 3"); + EXPECT_EQ(formatv("{}{1}", 0, 1).str(), "01"); #endif // NDEBUG } diff --git a/llvm/unittests/Target/LoongArch/InstSizes.cpp b/llvm/unittests/Target/LoongArch/InstSizes.cpp index da78012..2a0e9e20 100644 --- a/llvm/unittests/Target/LoongArch/InstSizes.cpp +++ b/llvm/unittests/Target/LoongArch/InstSizes.cpp @@ -140,3 +140,18 @@ TEST(InstSizes, AtomicPseudo) { EXPECT_EQ(44u, II.getInstSizeInBytes(*I)); }); } + +TEST(InstSizes, StatePoint) { + std::unique_ptr<LLVMTargetMachine> TM = createTargetMachine(); + std::unique_ptr<LoongArchInstrInfo> II = createInstrInfo(TM.get()); + + runChecks( + TM.get(), II.get(), " declare zeroext i1 @return_i1()\n", + // clang-format off + " STATEPOINT 0, 0, 0, target-flags(loongarch-call-plt) @return_i1, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, implicit-def $r3, implicit-def $r4\n", + // clang-format on + [](LoongArchInstrInfo &II, MachineFunction &MF) { + auto I = MF.begin()->begin(); + EXPECT_EQ(4u, II.getInstSizeInBytes(*I)); + }); +} diff --git a/llvm/utils/gn/build/BUILD.gn b/llvm/utils/gn/build/BUILD.gn index 1ccaefd..27f95bb 100644 --- a/llvm/utils/gn/build/BUILD.gn +++ b/llvm/utils/gn/build/BUILD.gn @@ -183,9 +183,11 @@ config("compiler_defaults") { cflags += [ "/EHs-c-" ] cflags_cc += [ "/std:c++17" ] - # cl.exe doesn't set __cplusplus correctly by default. - # clang-cl gets it right by default, so don't needlessly add the flag there. if (!is_clang) { + # expand __VA_ARGS__ in "OPTION(...) LLVM_MAKE_OPT_ID(__VA_ARGS__)" + cflags += [ "/Zc:preprocessor" ] + # cl.exe doesn't set __cplusplus correctly by default. + # clang-cl gets it right by default, so don't needlessly add the flag there. cflags_cc += [ "/Zc:__cplusplus" ] } diff --git a/llvm/utils/gn/build/toolchain/target_flags.gni b/llvm/utils/gn/build/toolchain/target_flags.gni index af8adcd..cbfa229 100644 --- a/llvm/utils/gn/build/toolchain/target_flags.gni +++ b/llvm/utils/gn/build/toolchain/target_flags.gni @@ -54,6 +54,6 @@ if (current_os == "android") { target_flags += [ "--target=$llvm_current_triple" ] } -if (current_cpu == "x86") { +if (current_cpu == "x86" && current_os != "win") { target_flags += [ "-m32" ] } diff --git a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn index f355321..43452b4 100644 --- a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn @@ -138,11 +138,13 @@ copy("Headers") { "avx10_2_512convertintrin.h", "avx10_2_512minmaxintrin.h", "avx10_2_512niintrin.h", + "avx10_2_512satcvtdsintrin.h", "avx10_2_512satcvtintrin.h", "avx10_2bf16intrin.h", "avx10_2convertintrin.h", "avx10_2minmaxintrin.h", "avx10_2niintrin.h", + "avx10_2satcvtdsintrin.h", "avx10_2satcvtintrin.h", "avx2intrin.h", "avx512bf16intrin.h", diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/lib/BUILD.gn index d8c75a0..02c2048 100644 --- a/llvm/utils/gn/secondary/compiler-rt/lib/BUILD.gn +++ b/llvm/utils/gn/secondary/compiler-rt/lib/BUILD.gn @@ -10,16 +10,16 @@ group("lib") { if (current_os == "linux" || current_os == "android") { deps += [ "//compiler-rt/lib/ubsan_minimal" ] } - if (current_os != "win" && current_os != "baremetal") { + if (current_os != "baremetal") { deps += [ "//compiler-rt/lib/asan", - "//compiler-rt/lib/ubsan", + "//compiler-rt/lib/profile", ] + } + if (current_os != "win" && current_os != "baremetal") { + deps += [ "//compiler-rt/lib/ubsan" ] if (current_cpu == "x64" || current_cpu == "arm64") { deps += [ "//compiler-rt/lib/tsan/rtl" ] } } - if (current_os != "baremetal") { - deps += [ "//compiler-rt/lib/profile" ] - } } diff --git a/llvm/utils/gn/secondary/compiler-rt/lib/asan/BUILD.gn b/llvm/utils/gn/secondary/compiler-rt/lib/asan/BUILD.gn index cf30875..42467c2 100644 --- a/llvm/utils/gn/secondary/compiler-rt/lib/asan/BUILD.gn +++ b/llvm/utils/gn/secondary/compiler-rt/lib/asan/BUILD.gn @@ -60,12 +60,12 @@ if (current_toolchain == host_toolchain) { "asan_thread.h", "asan_win.cpp", ] - if (target_os != "mac" && target_os != "win") { + if (current_os != "mac" && current_os != "win") { asan_sources += [ "asan_interceptors_vfork.S" ] } config("asan_config") { cflags = [] - if (target_os != "win") { + if (current_os != "win") { cflags += [ "-ftls-model=initial-exec" ] } else { ldflags = [ "/OPT:NOICF" ] @@ -76,11 +76,11 @@ if (current_toolchain == host_toolchain) { # FIXME: add_sanitizer_rt_version_list (cf hwasan) # FIXME: need libclang_rt.asan*.a.syms? - if (target_os == "android") { + if (current_os == "android") { ldflags = [ "-Wl,-z,global" ] } - if (target_os == "mac") { + if (current_os == "mac") { # The -U flags below correspond to the add_weak_symbols() calls in CMake. ldflags = [ "-lc++", @@ -145,7 +145,7 @@ if (current_toolchain == host_toolchain) { configs -= [ "//llvm/utils/gn/build:llvm_code" ] configs += [ "//llvm/utils/gn/build:crt_code" ] sources = [ "asan_rtl_static.cpp" ] - if (target_os != "mac" && target_os != "win") { + if (current_os != "mac" && current_os != "win") { sources += [ "asan_rtl_x86_64.S" ] } } @@ -183,7 +183,7 @@ if (current_toolchain == host_toolchain) { } } - if (current_os != "mac") { + if (current_os != "mac" && current_os != "win") { static_library("asan_static_library") { output_dir = crt_current_out_dir output_name = "clang_rt.asan$crt_current_target_suffix" @@ -232,7 +232,8 @@ if (current_toolchain == host_toolchain) { if (current_os == "win") { static_library("asan_static_runtime_thunk") { output_dir = crt_current_out_dir - output_name = "clang_rt.asan_static_runtime_thunk$crt_current_target_suffix" + output_name = + "clang_rt.asan_static_runtime_thunk$crt_current_target_suffix" configs -= [ "//llvm/utils/gn/build:llvm_code" ] configs += [ "//llvm/utils/gn/build:crt_code" ] complete_static_lib = true @@ -277,11 +278,11 @@ if (current_toolchain == host_toolchain) { deps = [ ":asan_shared_library" ] if (current_os == "win") { deps += [ - ":asan_static_runtime_thunk", ":asan_dynamic_runtime_thunk", + ":asan_static_runtime_thunk", ] } - if (current_os != "mac") { + if (current_os != "mac" && current_os != "win") { deps += [ ":asan_cxx", ":asan_preinit", diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn index c161947..b907e66 100644 --- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn +++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn @@ -996,7 +996,6 @@ if (current_toolchain == default_toolchain) { "exception", "execution", "expected", - "experimental/__config", "experimental/__simd/aligned_tag.h", "experimental/__simd/declaration.h", "experimental/__simd/reference.h", diff --git a/llvm/utils/release/build_llvm_release.bat b/llvm/utils/release/build_llvm_release.bat index 3508748..dd041d7d 100755 --- a/llvm/utils/release/build_llvm_release.bat +++ b/llvm/utils/release/build_llvm_release.bat @@ -80,7 +80,6 @@ REM Prerequisites: REM
REM Visual Studio 2019, CMake, Ninja, GNUWin32, SWIG, Python 3,
REM NSIS with the strlen_8192 patch,
-REM Visual Studio 2019 SDK and Nuget (for the clang-format plugin),
REM Perl (for the OpenMP run-time).
REM
REM
diff --git a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td index 1ec8227..cba35bb 100644 --- a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td @@ -214,7 +214,7 @@ def AMDGPU_RawBufferAtomicCmpswapOp : AttrSizedOperandSegments, AllTypesMatch<["src", "cmp", "value"]>, AllElementTypesMatch<["value", "memref"]>]>, - Arguments<(ins AnyTypeOf<[I32, I64, F32, F64]>:$src, + Arguments<(ins AnyType:$src, AnyType:$cmp, Arg<AnyMemRef, "buffer to operate on", [MemRead, MemWrite]>:$memref, Variadic<I32>:$indices, diff --git a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td index e6b27aa..23f8b8f 100644 --- a/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td @@ -24,6 +24,7 @@ def AmdgpuEmulateAtomicsPass : Pass<"amdgpu-emulate-atomics"> { let dependentDialects = [ "cf::ControlFlowDialect", "arith::ArithDialect", + "vector::VectorDialect" ]; let options = [Option<"chipset", "chipset", "std::string", /*default=*/"\"gfx000\"", diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td index 49e54df..2da45eb 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td @@ -1035,6 +1035,40 @@ def LLVM_TBAATagArrayAttr } //===----------------------------------------------------------------------===// +// ConstantRangeAttr +//===----------------------------------------------------------------------===// +def LLVM_ConstantRangeAttr : LLVM_Attr<"ConstantRange", "constant_range"> { + let parameters = (ins + "::llvm::APInt":$lower, + "::llvm::APInt":$upper + ); + let summary = "A range of two integers, corresponding to LLVM's ConstantRange"; + let description = [{ + A pair of two integers, mapping to the ConstantRange structure in LLVM IR, + which is allowed to wrap or be empty. + + The range represented is [Lower, Upper), and is either signed or unsigned + depending on context. + + `lower` and `upper` must have the same width. + + Syntax: + ``` + `<` `i`(width($lower)) $lower `,` $upper `>` + }]; + + let builders = [ + AttrBuilder<(ins "uint32_t":$bitWidth, "int64_t":$lower, "int64_t":$upper), [{ + return $_get($_ctxt, ::llvm::APInt(bitWidth, lower), ::llvm::APInt(bitWidth, upper)); + }]> + ]; + + let hasCustomAssemblyFormat = 1; + let genVerifyDecl = 1; +} + + +//===----------------------------------------------------------------------===// // VScaleRangeAttr //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td index 7b9a9cf..c3d352d8 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td @@ -319,17 +319,19 @@ class LLVM_IntrOpBase<Dialect dialect, string opName, string enumName, string immArgPositionsCpp = "{" # !interleave(immArgPositions, ", ") # "}"; string immArgAttrNamesCpp = "{" # !interleave(!foreach(name, immArgAttrNames, "StringLiteral(\"" # name # "\")"), ", ") # "}"; - let llvmBuilder = [{ + string baseLlvmBuilder = [{ auto *inst = LLVM::detail::createIntrinsicCall( builder, moduleTranslation, &opInst, llvm::Intrinsic::}] # !interleave([ enumName, "" # numResults, overloadedResultsCpp, overloadedOperandsCpp, immArgPositionsCpp, immArgAttrNamesCpp], ",") # [{); (void) inst; - }] # !if(!gt(requiresAccessGroup, 0), setAccessGroupsMetadataCode, "") + }]; + string baseLlvmBuilderCoda = !if(!gt(numResults, 0), "$res = inst;", ""); + let llvmBuilder = baseLlvmBuilder # !if(!gt(requiresAccessGroup, 0), setAccessGroupsMetadataCode, "") # !if(!gt(requiresAliasAnalysis, 0), setAliasAnalysisMetadataCode, "") - # !if(!gt(numResults, 0), "$res = inst;", ""); + # baseLlvmBuilderCoda; - string mlirBuilder = [{ + string baseMlirBuilder = [{ SmallVector<Value> mlirOperands; SmallVector<NamedAttribute> mlirAttrs; if (failed(moduleImport.convertIntrinsicArguments( @@ -345,9 +347,32 @@ class LLVM_IntrOpBase<Dialect dialect, string opName, string enumName, }] # !if(!gt(numResults, 0), "{$_resultType};", "{};") # [{ auto op = $_builder.create<$_qualCppClassName>( $_location, resultTypes, mlirOperands, mlirAttrs); - }] # !if(!gt(requiresFastmath, 0), + }]; + string baseMlirBuilderCoda = !if(!gt(numResults, 0), "$res = op;", "$_op = op;"); + let mlirBuilder = baseMlirBuilder # !if(!gt(requiresFastmath, 0), "moduleImport.setFastmathFlagsAttr(inst, op);", "") - # !if(!gt(numResults, 0), "$res = op;", "$_op = op;"); + # baseMlirBuilderCoda; + + // Code for handling a `range` attribute that holds the constant range of the + // intrinsic's result (if one is specified at the call site). This is intended + // for GPU IDs and other calls where range() is meaningful. It expects + // an optional LLVM_ConstantRangeAttr named `range` to be present on the + // operation. These are included to abstract out common code in several + // dialects. + string setRangeRetAttrCode = [{ + if ($range) { + inst->addRangeRetAttr(::llvm::ConstantRange( + $range->getLower(), $range->getUpper())); + } + }]; + string importRangeRetAttrCode = [{ + // Note: we don't want to look in to the declaration here. + auto rangeAttr = inst->getAttributes().getRetAttr(::llvm::Attribute::Range); + if (rangeAttr.isValid()) { + const ::llvm::ConstantRange& value = rangeAttr.getValueAsConstantRange(); + op.setRangeAttr(::mlir::LLVM::ConstantRangeAttr::get($_builder.getContext(), value.getLower(), value.getUpper())); + } + }]; } // Base class for LLVM intrinsic operations, should not be used directly. Places diff --git a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td index 35fd827..de23246 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td +++ b/mlir/include/mlir/Dialect/LLVMIR/ROCDLOps.td @@ -98,23 +98,36 @@ class ROCDL_IntrOp<string mnemonic, list<int> overloadedResults, // ROCDL special register op definitions //===----------------------------------------------------------------------===// -class ROCDL_SpecialRegisterOp<string mnemonic, - list<Trait> traits = []> : - ROCDL_Op<mnemonic, !listconcat(traits, [Pure])>, - Results<(outs LLVM_Type:$res)>, Arguments<(ins)> { - string llvmBuilder = "$res = createIntrinsicCallWithRange(builder," - # "llvm::Intrinsic::amdgcn_" # !subst(".","_", mnemonic) - # ", op->getAttrOfType<::mlir::DenseI32ArrayAttr>(\"range\"));"; - let assemblyFormat = "attr-dict `:` type($res)"; +class ROCDL_SpecialIdRegisterOp<string mnemonic> : + ROCDL_IntrPure1Op<mnemonic>, + Arguments<(ins OptionalAttr<LLVM_ConstantRangeAttr>:$range)> { + string llvmBuilder = baseLlvmBuilder # setRangeRetAttrCode # baseLlvmBuilderCoda; + string mlirBuilder = baseMlirBuilder # importRangeRetAttrCode # baseMlirBuilderCoda; + + let assemblyFormat = "(`range` $range^)? attr-dict `:` type($res)"; + + // Temporaly builder until Nvidia ops also support range attributes. + let builders = [ + OpBuilder<(ins "Type":$resultType), [{ + build($_builder, $_state, resultType, ::mlir::LLVM::ConstantRangeAttr{}); + }]> + ]; } -class ROCDL_DeviceFunctionOp<string mnemonic, string device_function, +class ROCDL_DimGetterFunctionOp<string mnemonic, string device_function, int parameter, list<Trait> traits = []> : ROCDL_Op<mnemonic, !listconcat(traits, [Pure])>, - Results<(outs LLVM_Type:$res)>, Arguments<(ins)> { - string llvmBuilder = "$res = createDeviceFunctionCall(builder, \"" + Results<(outs LLVM_Type:$res)>, Arguments<(ins OptionalAttr<LLVM_ConstantRangeAttr>:$range)> { + string llvmBuilder = "$res = createDimGetterFunctionCall(builder, op, \"" # device_function # "\", " # parameter # ");"; - let assemblyFormat = "attr-dict `:` type($res)"; + let assemblyFormat = "(`range` $range^)? attr-dict `:` type($res)"; + + // Temporaly builder until Nvidia ops also support range attributes. + let builders = [ + OpBuilder<(ins "Type":$resultType), [{ + build($_builder, $_state, resultType, ::mlir::LLVM::ConstantRangeAttr{}); + }]> + ]; } //===----------------------------------------------------------------------===// @@ -181,33 +194,33 @@ def ROCDL_BallotOp : //===----------------------------------------------------------------------===// // Thread index and Block index -def ROCDL_ThreadIdXOp : ROCDL_SpecialRegisterOp<"workitem.id.x">; -def ROCDL_ThreadIdYOp : ROCDL_SpecialRegisterOp<"workitem.id.y">; -def ROCDL_ThreadIdZOp : ROCDL_SpecialRegisterOp<"workitem.id.z">; +def ROCDL_ThreadIdXOp : ROCDL_SpecialIdRegisterOp<"workitem.id.x">; +def ROCDL_ThreadIdYOp : ROCDL_SpecialIdRegisterOp<"workitem.id.y">; +def ROCDL_ThreadIdZOp : ROCDL_SpecialIdRegisterOp<"workitem.id.z">; -def ROCDL_BlockIdXOp : ROCDL_SpecialRegisterOp<"workgroup.id.x">; -def ROCDL_BlockIdYOp : ROCDL_SpecialRegisterOp<"workgroup.id.y">; -def ROCDL_BlockIdZOp : ROCDL_SpecialRegisterOp<"workgroup.id.z">; +def ROCDL_BlockIdXOp : ROCDL_SpecialIdRegisterOp<"workgroup.id.x">; +def ROCDL_BlockIdYOp : ROCDL_SpecialIdRegisterOp<"workgroup.id.y">; +def ROCDL_BlockIdZOp : ROCDL_SpecialIdRegisterOp<"workgroup.id.z">; //===----------------------------------------------------------------------===// // Thread range and Block range -def ROCDL_BlockDimXOp : ROCDL_DeviceFunctionOp<"workgroup.dim.x", +def ROCDL_BlockDimXOp : ROCDL_DimGetterFunctionOp<"workgroup.dim.x", "__ockl_get_local_size", 0>; -def ROCDL_BlockDimYOp : ROCDL_DeviceFunctionOp<"workgroup.dim.y", +def ROCDL_BlockDimYOp : ROCDL_DimGetterFunctionOp<"workgroup.dim.y", "__ockl_get_local_size", 1>; -def ROCDL_BlockDimZOp : ROCDL_DeviceFunctionOp<"workgroup.dim.z", +def ROCDL_BlockDimZOp : ROCDL_DimGetterFunctionOp<"workgroup.dim.z", "__ockl_get_local_size", 2>; -def ROCDL_GridDimXOp : ROCDL_DeviceFunctionOp<"grid.dim.x", +def ROCDL_GridDimXOp : ROCDL_DimGetterFunctionOp<"grid.dim.x", "__ockl_get_num_groups", 0>; -def ROCDL_GridDimYOp : ROCDL_DeviceFunctionOp<"grid.dim.y", +def ROCDL_GridDimYOp : ROCDL_DimGetterFunctionOp<"grid.dim.y", "__ockl_get_num_groups", 1>; -def ROCDL_GridDimZOp : ROCDL_DeviceFunctionOp<"grid.dim.z", +def ROCDL_GridDimZOp : ROCDL_DimGetterFunctionOp<"grid.dim.z", "__ockl_get_num_groups", 2>; //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp index 9fb557b..c2785f3 100644 --- a/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp +++ b/mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp @@ -108,8 +108,6 @@ struct RawBufferOpLowering : public ConvertOpToLLVMPattern<GpuOp> { if (wantedVecType.getElementType().isBF16()) llvmBufferValType = wantedVecType.clone(rewriter.getI16Type()); if (atomicCmpData) { - if (isa<VectorType>(wantedDataType)) - return gpuOp.emitOpError("vector compare-and-swap does not exist"); if (auto floatType = dyn_cast<FloatType>(wantedDataType)) llvmBufferValType = this->getTypeConverter()->convertType( rewriter.getIntegerType(floatType.getWidth())); diff --git a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h index e4cd24e..eaf1554 100644 --- a/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h +++ b/mlir/lib/Conversion/GPUCommon/IndexIntrinsicsOpLowering.h @@ -114,9 +114,9 @@ public: if (upperBound && intrType != IntrType::None) { int32_t min = (intrType == IntrType::Dim ? 1 : 0); - int32_t max = *upperBound - (intrType == IntrType::Id ? 0 : 1); - newOp->setAttr( - "range", DenseI32ArrayAttr::get(op.getContext(), ArrayRef{min, max})); + int32_t max = *upperBound + (intrType == IntrType::Id ? 0 : 1); + newOp->setAttr("range", LLVM::ConstantRangeAttr::get( + rewriter.getContext(), 32, min, max)); } if (indexBitwidth > 32) { newOp = rewriter.create<LLVM::SExtOp>( diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp index 2992671..fc3e1fc 100644 --- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp +++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp @@ -26,6 +26,7 @@ #include "mlir/Conversion/LLVMCommon/LoweringOptions.h" #include "mlir/Conversion/LLVMCommon/Pattern.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Conversion/MathToLLVM/MathToLLVM.h" #include "mlir/Conversion/MathToROCDL/MathToROCDL.h" #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" @@ -290,6 +291,7 @@ struct LowerGpuOpsToROCDLOpsPass populateAMDGPUToROCDLConversionPatterns(converter, llvmPatterns, *maybeChipset); populateVectorToLLVMConversionPatterns(converter, llvmPatterns); + populateMathToLLVMConversionPatterns(converter, llvmPatterns); cf::populateControlFlowToLLVMConversionPatterns(converter, llvmPatterns); populateFuncToLLVMConversionPatterns(converter, llvmPatterns); populateFinalizeMemRefToLLVMConversionPatterns(converter, llvmPatterns); @@ -332,7 +334,11 @@ void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) { target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FCeilOp, LLVM::FFloorOp, LLVM::FRemOp, LLVM::LogOp, LLVM::Log10Op, LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp>(); - + // These ops are legal for f16 and f32 type. + target.addDynamicallyLegalOp<LLVM::ExpOp, LLVM::LogOp>([](Operation *op) { + return any_of(op->getOperandTypes(), + llvm::IsaPred<Float16Type, Float32Type>); + }); // TODO: Remove once we support replacing non-root ops. target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp>(); } diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/CMakeLists.txt b/mlir/lib/Dialect/AMDGPU/Transforms/CMakeLists.txt index 0889a21..5f93471 100644 --- a/mlir/lib/Dialect/AMDGPU/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/AMDGPU/Transforms/CMakeLists.txt @@ -11,6 +11,7 @@ add_mlir_dialect_library(MLIRAMDGPUTransforms MLIRAMDGPUDialect MLIRAMDGPUUtils MLIRArithDialect + MLIRVectorDialect MLIRControlFlowDialect MLIRFuncDialect MLIRIR diff --git a/mlir/lib/Dialect/AMDGPU/Transforms/EmulateAtomics.cpp b/mlir/lib/Dialect/AMDGPU/Transforms/EmulateAtomics.cpp index 21042af..77f972e 100644 --- a/mlir/lib/Dialect/AMDGPU/Transforms/EmulateAtomics.cpp +++ b/mlir/lib/Dialect/AMDGPU/Transforms/EmulateAtomics.cpp @@ -13,7 +13,9 @@ #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h" +#include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/TypeUtilities.h" #include "mlir/Transforms/DialectConversion.h" namespace mlir::amdgpu { @@ -86,6 +88,23 @@ static void patchOperandSegmentSizes(ArrayRef<NamedAttribute> attrs, } } +// A helper function to flatten a vector value to a scalar containing its bits, +// returning the value itself if othetwise. +static Value flattenVecToBits(ConversionPatternRewriter &rewriter, Location loc, + Value val) { + auto vectorType = dyn_cast<VectorType>(val.getType()); + if (!vectorType) + return val; + + int64_t bitwidth = + vectorType.getElementTypeBitWidth() * vectorType.getNumElements(); + Type allBitsType = rewriter.getIntegerType(bitwidth); + auto allBitsVecType = VectorType::get({1}, allBitsType); + Value bitcast = rewriter.create<vector::BitCastOp>(loc, allBitsVecType, val); + Value scalar = rewriter.create<vector::ExtractOp>(loc, bitcast, 0); + return scalar; +} + template <typename AtomicOp, typename ArithOp> LogicalResult RawBufferAtomicByCasPattern<AtomicOp, ArithOp>::matchAndRewrite( AtomicOp atomicOp, Adaptor adaptor, @@ -113,6 +132,7 @@ LogicalResult RawBufferAtomicByCasPattern<AtomicOp, ArithOp>::matchAndRewrite( rewriter.setInsertionPointToEnd(loopBlock); Value prevLoad = loopBlock->getArgument(0); Value operated = rewriter.create<ArithOp>(loc, data, prevLoad); + dataType = operated.getType(); SmallVector<NamedAttribute> cmpswapAttrs; patchOperandSegmentSizes(origAttrs, cmpswapAttrs, DataArgAction::Duplicate); @@ -126,8 +146,8 @@ LogicalResult RawBufferAtomicByCasPattern<AtomicOp, ArithOp>::matchAndRewrite( // an int->float bitcast is introduced to account for the fact that cmpswap // only takes integer arguments. - Value prevLoadForCompare = prevLoad; - Value atomicResForCompare = atomicRes; + Value prevLoadForCompare = flattenVecToBits(rewriter, loc, prevLoad); + Value atomicResForCompare = flattenVecToBits(rewriter, loc, atomicRes); if (auto floatDataTy = dyn_cast<FloatType>(dataType)) { Type equivInt = rewriter.getIntegerType(floatDataTy.getWidth()); prevLoadForCompare = @@ -146,9 +166,17 @@ LogicalResult RawBufferAtomicByCasPattern<AtomicOp, ArithOp>::matchAndRewrite( void mlir::amdgpu::populateAmdgpuEmulateAtomicsPatterns( ConversionTarget &target, RewritePatternSet &patterns, Chipset chipset) { // gfx10 has no atomic adds. - if (chipset >= Chipset(10, 0, 0) || chipset < Chipset(9, 0, 8)) { + if (chipset.majorVersion == 10 || chipset < Chipset(9, 0, 8)) { target.addIllegalOp<RawBufferAtomicFaddOp>(); } + // gfx11 has no fp16 atomics + if (chipset.majorVersion == 11) { + target.addDynamicallyLegalOp<RawBufferAtomicFaddOp>( + [](RawBufferAtomicFaddOp op) -> bool { + Type elemType = getElementTypeOrSelf(op.getValue().getType()); + return !isa<Float16Type, BFloat16Type>(elemType); + }); + } // gfx9 has no to a very limited support for floating-point min and max. if (chipset.majorVersion == 9) { if (chipset >= Chipset(9, 0, 0xa) && chipset != Chipset(9, 4, 1)) { diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp index 491dcc7..6047c4a 100644 --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp @@ -233,6 +233,47 @@ DIRecursiveTypeAttrInterface DISubprogramAttr::getRecSelf(DistinctAttr recId) { } //===----------------------------------------------------------------------===// +// ConstantRangeAttr +//===----------------------------------------------------------------------===// + +Attribute ConstantRangeAttr::parse(AsmParser &parser, Type odsType) { + llvm::SMLoc loc = parser.getCurrentLocation(); + IntegerType widthType; + if (parser.parseLess() || parser.parseType(widthType) || + parser.parseComma()) { + return Attribute{}; + } + unsigned bitWidth = widthType.getWidth(); + APInt lower(bitWidth, 0); + APInt upper(bitWidth, 0); + if (parser.parseInteger(lower) || parser.parseComma() || + parser.parseInteger(upper) || parser.parseGreater()) + return Attribute{}; + // For some reason, 0 is always parsed as 64-bits, fix that if needed. + if (lower.isZero()) + lower = lower.sextOrTrunc(bitWidth); + if (upper.isZero()) + upper = upper.sextOrTrunc(bitWidth); + return parser.getChecked<ConstantRangeAttr>(loc, parser.getContext(), lower, + upper); +} + +void ConstantRangeAttr::print(AsmPrinter &printer) const { + printer << "<i" << getLower().getBitWidth() << ", " << getLower() << ", " + << getUpper() << ">"; +} + +LogicalResult +ConstantRangeAttr::verify(llvm::function_ref<InFlightDiagnostic()> emitError, + APInt lower, APInt upper) { + if (lower.getBitWidth() != upper.getBitWidth()) + return emitError() + << "expected lower and upper to have matching bitwidths but got " + << lower.getBitWidth() << " vs. " << upper.getBitWidth(); + return success(); +} + +//===----------------------------------------------------------------------===// // TargetFeaturesAttr //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp index c1ee650..ec21fbf 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.cpp @@ -26,25 +26,13 @@ using namespace mlir; using namespace mlir::LLVM; using mlir::LLVM::detail::createIntrinsicCall; -static llvm::Value *createIntrinsicCallWithRange(llvm::IRBuilderBase &builder, - llvm::Intrinsic::ID intrinsic, - DenseI32ArrayAttr maybeRange) { - auto *inst = llvm::cast<llvm::CallInst>( - createIntrinsicCall(builder, intrinsic, {}, {})); - if (maybeRange) { - llvm::ConstantRange Range(APInt(32, maybeRange[0]), - APInt(32, maybeRange[1])); - inst->addRangeRetAttr(Range); - } - return inst; -} - -// Create a call to ROCm-Device-Library function -// Currently this routine will work only for calling ROCDL functions that -// take a single int32 argument. It is likely that the interface of this -// function will change to make it more generic. -static llvm::Value *createDeviceFunctionCall(llvm::IRBuilderBase &builder, - StringRef fnName, int parameter) { +// Create a call to ROCm-Device-Library function that returns an ID. +// This is intended to specifically call device functions that fetch things like +// block or grid dimensions, and so is limited to functions that take one +// integer parameter. +static llvm::Value *createDimGetterFunctionCall(llvm::IRBuilderBase &builder, + Operation *op, StringRef fnName, + int parameter) { llvm::Module *module = builder.GetInsertBlock()->getModule(); llvm::FunctionType *functionType = llvm::FunctionType::get( llvm::Type::getInt64Ty(module->getContext()), // return type. @@ -54,7 +42,14 @@ static llvm::Value *createDeviceFunctionCall(llvm::IRBuilderBase &builder, module->getOrInsertFunction(fnName, functionType).getCallee()); llvm::Value *fnOp0 = llvm::ConstantInt::get( llvm::Type::getInt32Ty(module->getContext()), parameter); - return builder.CreateCall(fn, ArrayRef<llvm::Value *>(fnOp0)); + auto *call = builder.CreateCall(fn, ArrayRef<llvm::Value *>(fnOp0)); + if (auto rangeAttr = op->getAttrOfType<LLVM::ConstantRangeAttr>("range")) { + // Zero-extend to 64 bits because the GPU dialect uses 32-bit bounds but + // these ockl functions are defined to be 64-bits + call->addRangeRetAttr(llvm::ConstantRange(rangeAttr.getLower().zext(64), + rangeAttr.getUpper().zext(64))); + } + return call; } namespace { diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp index f288c7f..b58a95c 100644 --- a/mlir/lib/Transforms/Utils/DialectConversion.cpp +++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp @@ -624,10 +624,9 @@ private: class ReplaceOperationRewrite : public OperationRewrite { public: ReplaceOperationRewrite(ConversionPatternRewriterImpl &rewriterImpl, - Operation *op, const TypeConverter *converter, - bool changedResults) + Operation *op, const TypeConverter *converter) : OperationRewrite(Kind::ReplaceOperation, rewriterImpl, op), - converter(converter), changedResults(changedResults) {} + converter(converter) {} static bool classof(const IRRewrite *rewrite) { return rewrite->getKind() == Kind::ReplaceOperation; @@ -641,15 +640,10 @@ public: const TypeConverter *getConverter() const { return converter; } - bool hasChangedResults() const { return changedResults; } - private: /// An optional type converter that can be used to materialize conversions /// between the new and old values if necessary. const TypeConverter *converter; - - /// A boolean flag that indicates whether result types have changed or not. - bool changedResults; }; class CreateOperationRewrite : public OperationRewrite { @@ -941,6 +935,9 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener { /// to modify/access them is invalid rewriter API usage. SetVector<Operation *> replacedOps; + /// A set of all unresolved materializations. + DenseSet<Operation *> unresolvedMaterializations; + /// The current type converter, or nullptr if no type converter is currently /// active. const TypeConverter *currentTypeConverter = nullptr; @@ -1066,6 +1063,7 @@ void UnresolvedMaterializationRewrite::rollback() { for (Value input : op->getOperands()) rewriterImpl.mapping.erase(input); } + rewriterImpl.unresolvedMaterializations.erase(op); op->erase(); } @@ -1347,6 +1345,7 @@ Value ConversionPatternRewriterImpl::buildUnresolvedMaterialization( builder.setInsertionPoint(ip.getBlock(), ip.getPoint()); auto convertOp = builder.create<UnrealizedConversionCastOp>(loc, outputType, inputs); + unresolvedMaterializations.insert(convertOp); appendRewrite<UnresolvedMaterializationRewrite>(convertOp, converter, kind); return convertOp.getResult(0); } @@ -1379,22 +1378,28 @@ void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op, assert(newValues.size() == op->getNumResults()); assert(!ignoredOps.contains(op) && "operation was already replaced"); - // Track if any of the results changed, e.g. erased and replaced with null. - bool resultChanged = false; - // Create mappings for each of the new result values. for (auto [newValue, result] : llvm::zip(newValues, op->getResults())) { if (!newValue) { - resultChanged = true; - continue; + // This result was dropped and no replacement value was provided. + if (unresolvedMaterializations.contains(op)) { + // Do not create another materializations if we are erasing a + // materialization. + continue; + } + + // Materialize a replacement value "out of thin air". + newValue = buildUnresolvedMaterialization( + MaterializationKind::Source, computeInsertPoint(result), + result.getLoc(), /*inputs=*/ValueRange(), + /*outputType=*/result.getType(), currentTypeConverter); } + // Remap, and check for any result type changes. mapping.map(result, newValue); - resultChanged |= (newValue.getType() != result.getType()); } - appendRewrite<ReplaceOperationRewrite>(op, currentTypeConverter, - resultChanged); + appendRewrite<ReplaceOperationRewrite>(op, currentTypeConverter); // Mark this operation and all nested ops as replaced. op->walk([&](Operation *op) { replacedOps.insert(op); }); @@ -2359,11 +2364,6 @@ private: ConversionPatternRewriterImpl &rewriterImpl, DenseMap<Value, SmallVector<Value>> &inverseMapping); - /// Legalize an operation result that was marked as "erased". - LogicalResult - legalizeErasedResult(Operation *op, OpResult result, - ConversionPatternRewriterImpl &rewriterImpl); - /// Dialect conversion configuration. ConversionConfig config; @@ -2455,77 +2455,6 @@ legalizeUnresolvedMaterialization(RewriterBase &rewriter, return failure(); } -/// Erase all dead unrealized_conversion_cast ops. An op is dead if its results -/// are not used (transitively) by any op that is not in the given list of -/// cast ops. -/// -/// In particular, this function erases cyclic casts that may be inserted -/// during the dialect conversion process. E.g.: -/// %0 = unrealized_conversion_cast(%1) -/// %1 = unrealized_conversion_cast(%0) -// Note: This step will become unnecessary when -// https://github.com/llvm/llvm-project/pull/106760 has been merged. -static void eraseDeadUnrealizedCasts( - ArrayRef<UnrealizedConversionCastOp> castOps, - SmallVectorImpl<UnrealizedConversionCastOp> *remainingCastOps) { - // Ops that have already been visited or are currently being visited. - DenseSet<Operation *> visited; - // Set of all cast ops for faster lookups. - DenseSet<Operation *> castOpSet; - // Set of all cast ops that have been determined to be alive. - DenseSet<Operation *> live; - - for (UnrealizedConversionCastOp op : castOps) - castOpSet.insert(op); - - // Visit a cast operation. Return "true" if the operation is live. - std::function<bool(Operation *)> visit = [&](Operation *op) -> bool { - // No need to traverse any IR if the op was already marked as live. - if (live.contains(op)) - return true; - - // Do not visit ops multiple times. If we find a circle, no live user was - // found on the current path. - if (!visited.insert(op).second) - return false; - - // Visit all users. - for (Operation *user : op->getUsers()) { - // If the user is not an unrealized_conversion_cast op, then the given op - // is live. - if (!castOpSet.contains(user)) { - live.insert(op); - return true; - } - // Otherwise, it is live if a live op can be reached from one of its - // users (which must all be unrealized_conversion_cast ops). - if (visit(user)) { - live.insert(op); - return true; - } - } - - return false; - }; - - // Visit all cast ops. - for (UnrealizedConversionCastOp op : castOps) { - visit(op); - visited.clear(); - } - - // Erase all cast ops that are dead. - for (UnrealizedConversionCastOp op : castOps) { - if (live.contains(op)) { - if (remainingCastOps) - remainingCastOps->push_back(op); - continue; - } - op->dropAllUses(); - op->erase(); - } -} - LogicalResult OperationConverter::convertOperations(ArrayRef<Operation *> ops) { if (ops.empty()) return success(); @@ -2584,14 +2513,13 @@ LogicalResult OperationConverter::convertOperations(ArrayRef<Operation *> ops) { // Reconcile all UnrealizedConversionCastOps that were inserted by the // dialect conversion frameworks. (Not the one that were inserted by // patterns.) - SmallVector<UnrealizedConversionCastOp> remainingCastOps1, remainingCastOps2; - eraseDeadUnrealizedCasts(allCastOps, &remainingCastOps1); - reconcileUnrealizedCasts(remainingCastOps1, &remainingCastOps2); + SmallVector<UnrealizedConversionCastOp> remainingCastOps; + reconcileUnrealizedCasts(allCastOps, &remainingCastOps); // Try to legalize all unresolved materializations. if (config.buildMaterializations) { IRRewriter rewriter(rewriterImpl.context, config.listener); - for (UnrealizedConversionCastOp castOp : remainingCastOps2) { + for (UnrealizedConversionCastOp castOp : remainingCastOps) { auto it = rewriteMap.find(castOp.getOperation()); assert(it != rewriteMap.end() && "inconsistent state"); if (failed(legalizeUnresolvedMaterialization(rewriter, it->second))) @@ -2646,30 +2574,22 @@ LogicalResult OperationConverter::legalizeConvertedOpResultTypes( for (unsigned i = 0; i < rewriterImpl.rewrites.size(); ++i) { auto *opReplacement = dyn_cast<ReplaceOperationRewrite>(rewriterImpl.rewrites[i].get()); - if (!opReplacement || !opReplacement->hasChangedResults()) + if (!opReplacement) continue; Operation *op = opReplacement->getOperation(); for (OpResult result : op->getResults()) { - Value newValue = rewriterImpl.mapping.lookupOrNull(result); - - // If the operation result was replaced with null, all of the uses of this - // value should be replaced. - if (!newValue) { - if (failed(legalizeErasedResult(op, result, rewriterImpl))) - return failure(); + // If the type of this op result changed and the result is still live, + // we need to materialize a conversion. + if (rewriterImpl.mapping.lookupOrNull(result, result.getType())) continue; - } - - // Otherwise, check to see if the type of the result changed. - if (result.getType() == newValue.getType()) - continue; - Operation *liveUser = findLiveUserOfReplaced(result, rewriterImpl, inverseMapping); if (!liveUser) continue; // Legalize this result. + Value newValue = rewriterImpl.mapping.lookupOrNull(result); + assert(newValue && "replacement value not found"); Value castValue = rewriterImpl.buildUnresolvedMaterialization( MaterializationKind::Source, computeInsertPoint(result), op->getLoc(), /*inputs=*/newValue, /*outputType=*/result.getType(), @@ -2727,25 +2647,6 @@ LogicalResult OperationConverter::legalizeConvertedArgumentTypes( return success(); } -LogicalResult OperationConverter::legalizeErasedResult( - Operation *op, OpResult result, - ConversionPatternRewriterImpl &rewriterImpl) { - // If the operation result was replaced with null, all of the uses of this - // value should be replaced. - auto liveUserIt = llvm::find_if_not(result.getUsers(), [&](Operation *user) { - return rewriterImpl.isOpIgnored(user); - }); - if (liveUserIt != result.user_end()) { - InFlightDiagnostic diag = op->emitError("failed to legalize operation '") - << op->getName() << "' marked as erased"; - diag.attachNote(liveUserIt->getLoc()) - << "found live user of result #" << result.getResultNumber() << ": " - << *liveUserIt; - return failure(); - } - return success(); -} - //===----------------------------------------------------------------------===// // Reconcile Unrealized Casts //===----------------------------------------------------------------------===// diff --git a/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir b/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir index cc51a8c..9f4db15 100644 --- a/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir +++ b/mlir/test/Conversion/AMDGPUToROCDL/amdgpu-to-rocdl.mlir @@ -224,6 +224,18 @@ func.func @amdgpu_raw_buffer_atomic_cmpswap_i64(%src : i64, %cmp : i64, %buf : m func.return %dst : i64 } +// CHECK-LABEL: func @amdgpu_raw_buffer_atomic_cmpswap_v2f16 +// CHECK-SAME: (%[[src:.*]]: vector<2xf16>, %[[cmp:.*]]: vector<2xf16>, {{.*}}) +func.func @amdgpu_raw_buffer_atomic_cmpswap_v2f16(%src : vector<2xf16>, %cmp : vector<2xf16>, %buf : memref<64xf16>, %idx: i32) -> vector<2xf16> { + // CHECK-DAG: %[[srcBits:.+]] = llvm.bitcast %[[src]] : vector<2xf16> to i32 + // CHECK-DAG: %[[cmpBits:.+]] = llvm.bitcast %[[cmp]] : vector<2xf16> to i32 + // CHECK: %[[dstBits:.+]] = rocdl.raw.ptr.buffer.atomic.cmpswap %[[srcBits]], %[[cmpBits]], %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i32 + // CHECK: %[[dst:.+]] = llvm.bitcast %[[dstBits]] : i32 to vector<2xf16> + // CHECK: return %[[dst]] + %dst = amdgpu.raw_buffer_atomic_cmpswap {boundsCheck = true} %src, %cmp -> %buf[%idx] : vector<2xf16> -> memref<64xf16>, i32 + func.return %dst : vector<2xf16> +} + // CHECK-LABEL: func @lds_barrier func.func @lds_barrier() { // GFX908: llvm.inline_asm has_side_effects asm_dialect = att diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir index b6fb085..2396ddf 100644 --- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir +++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir @@ -77,18 +77,18 @@ gpu.module @test_module { {known_block_size = array<i32: 8, 12, 16>, known_grid_size = array<i32: 20, 24, 28>} { - // CHECK: rocdl.workitem.id.x {range = array<i32: 0, 8>} : i32 + // CHECK: rocdl.workitem.id.x range <i32, 0, 8> : i32 %tIdX = gpu.thread_id x - // CHECK: rocdl.workitem.id.y {range = array<i32: 0, 12>} : i32 + // CHECK: rocdl.workitem.id.y range <i32, 0, 12> : i32 %tIdY = gpu.thread_id y - // CHECK: rocdl.workitem.id.z {range = array<i32: 0, 16>} : i32 + // CHECK: rocdl.workitem.id.z range <i32, 0, 16> : i32 %tIdZ = gpu.thread_id z - // CHECK: rocdl.workgroup.id.x {range = array<i32: 0, 20>} : i32 + // CHECK: rocdl.workgroup.id.x range <i32, 0, 20> : i32 %bIdX = gpu.block_id x - // CHECK: rocdl.workgroup.id.y {range = array<i32: 0, 24>} : i32 + // CHECK: rocdl.workgroup.id.y range <i32, 0, 24> : i32 %bIdY = gpu.block_id y - // CHECK: rocdl.workgroup.id.z {range = array<i32: 0, 28>} : i32 + // CHECK: rocdl.workgroup.id.z range <i32, 0, 28> : i32 %bIdZ = gpu.block_id z // "Usage" to make the ID calls not die @@ -132,6 +132,68 @@ gpu.module @test_module { // ----- gpu.module @test_module { + // CHECK-LABEL: func @gpu_sqrt + func.func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { + %result16 = math.sqrt %arg_f16 : f16 + // CHECK: llvm.intr.sqrt(%{{.*}}) : (f16) -> f16 + %result32 = math.sqrt %arg_f32 : f32 + // CHECK: llvm.intr.sqrt(%{{.*}}) : (f32) -> f32 + %result64 = math.sqrt %arg_f64 : f64 + // CHECK: llvm.intr.sqrt(%{{.*}}) : (f64) -> f64 + func.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + +gpu.module @test_module { + // CHECK-LABEL: func @gpu_fabs + func.func @gpu_fabs(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { + %result16 = math.absf %arg_f16 : f16 + // CHECK: llvm.intr.fabs(%{{.*}}) : (f16) -> f16 + %result32 = math.absf %arg_f32 : f32 + // CHECK: llvm.intr.fabs(%{{.*}}) : (f32) -> f32 + %result64 = math.absf %arg_f64 : f64 + // CHECK: llvm.intr.fabs(%{{.*}}) : (f64) -> f64 + func.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + +gpu.module @test_module { + // CHECK: llvm.func @__ocml_exp_f64(f64) -> f64 + // CHECK-LABEL: func @gpu_exp + func.func @gpu_exp(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { + %result16 = math.exp %arg_f16 : f16 + // CHECK: llvm.intr.exp(%{{.*}}) : (f16) -> f16 + %result32 = math.exp %arg_f32 : f32 + // CHECK: llvm.intr.exp(%{{.*}}) : (f32) -> f32 + %result64 = math.exp %arg_f64 : f64 + // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (f64) -> f64 + func.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + +gpu.module @test_module { + // CHECK: llvm.func @__ocml_log_f64(f64) -> f64 + // CHECK-LABEL: func @gpu_log + func.func @gpu_log(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) { + %result16 = math.log %arg_f16 : f16 + // CHECK: llvm.intr.log(%{{.*}}) : (f16) -> f16 + %result32 = math.log %arg_f32 : f32 + // CHECK: llvm.intr.log(%{{.*}}) : (f32) -> f32 + %result64 = math.log %arg_f64 : f64 + // CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (f64) -> f64 + func.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + +gpu.module @test_module { // CHECK: llvm.func @__ocml_cbrt_f32(f32) -> f32 // CHECK: llvm.func @__ocml_cbrt_f64(f64) -> f64 // CHECK-LABEL: func @gpu_cbrt diff --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir index d8570bd..25ec5d0 100644 --- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir +++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir @@ -558,8 +558,8 @@ func.func @deinterleave(%a: vector<4xf32>) -> (vector<2xf32>, vector<2xf32>) { // CHECK-LABEL: func @deinterleave_scalar // CHECK-SAME: (%[[ARG0:.+]]: vector<2xf32>) -// CHECK: %[[EXTRACT0:.*]] = spirv.CompositeExtract %[[ARG0]][0 : i32] : vector<2xf32> -// CHECK: %[[EXTRACT1:.*]] = spirv.CompositeExtract %[[ARG0]][1 : i32] : vector<2xf32> +// CHECK-DAG: %[[EXTRACT0:.*]] = spirv.CompositeExtract %[[ARG0]][0 : i32] : vector<2xf32> +// CHECK-DAG: %[[EXTRACT1:.*]] = spirv.CompositeExtract %[[ARG0]][1 : i32] : vector<2xf32> // CHECK-DAG: %[[CAST0:.*]] = builtin.unrealized_conversion_cast %[[EXTRACT0]] : f32 to vector<1xf32> // CHECK-DAG: %[[CAST1:.*]] = builtin.unrealized_conversion_cast %[[EXTRACT1]] : f32 to vector<1xf32> // CHECK: return %[[CAST0]], %[[CAST1]] diff --git a/mlir/test/Dialect/AMDGPU/amdgpu-emulate-atomics.mlir b/mlir/test/Dialect/AMDGPU/amdgpu-emulate-atomics.mlir index b1c7c1b..104af58 100644 --- a/mlir/test/Dialect/AMDGPU/amdgpu-emulate-atomics.mlir +++ b/mlir/test/Dialect/AMDGPU/amdgpu-emulate-atomics.mlir @@ -1,5 +1,6 @@ // RUN: mlir-opt -split-input-file -amdgpu-emulate-atomics=chipset=gfx90a %s | FileCheck %s --check-prefixes=CHECK,GFX9 // RUN: mlir-opt -split-input-file -amdgpu-emulate-atomics=chipset=gfx1030 %s | FileCheck %s --check-prefixes=CHECK,GFX10 +// RUN: mlir-opt -split-input-file -amdgpu-emulate-atomics=chipset=gfx1100 %s | FileCheck %s --check-prefixes=CHECK,GFX11 // ----- @@ -8,6 +9,7 @@ func.func @atomic_fmax(%val: f32, %buffer: memref<?xf32>, %idx: i32) { // CHECK-SAME: ([[val:%.+]]: f32, [[buffer:%.+]]: memref<?xf32>, [[idx:%.+]]: i32) // CHECK: gpu.printf "Begin\0A" // GFX10: amdgpu.raw_buffer_atomic_fmax {foo, indexOffset = 4 : i32} [[val]] -> [[buffer]][[[idx]]] +// GFX11: amdgpu.raw_buffer_atomic_fmax {foo, indexOffset = 4 : i32} [[val]] -> [[buffer]][[[idx]]] // GFX9: [[ld:%.+]] = amdgpu.raw_buffer_load {foo, indexOffset = 4 : i32} [[buffer]][[[idx]]] // GFX9: cf.br [[loop:\^.+]]([[ld]] : f32) // GFX9: [[loop]]([[arg:%.+]]: f32): @@ -33,6 +35,7 @@ func.func @atomic_fmax_f64(%val: f64, %buffer: memref<?xf64>, %idx: i32) { // CHECK: gpu.printf "Begin\0A" // GFX9: amdgpu.raw_buffer_atomic_fmax [[val]] -> [[buffer]][[[idx]]] // GFX10: amdgpu.raw_buffer_atomic_fmax [[val]] -> [[buffer]][[[idx]]] +// GFX11: amdgpu.raw_buffer_atomic_fmax [[val]] -> [[buffer]][[[idx]]] // CHECK-NEXT: gpu.printf "End\0A" gpu.printf "Begin\n" amdgpu.raw_buffer_atomic_fmax %val -> %buffer[%idx] : f64 -> memref<?xf64>, i32 @@ -47,6 +50,25 @@ func.func @atomic_fadd(%val: f32, %buffer: memref<?xf32>, %idx: i32) { // GFX9: amdgpu.raw_buffer_atomic_fadd // GFX10: amdgpu.raw_buffer_load // GFX10: amdgpu.raw_buffer_atomic_cmpswap +// GFX11: amdgpu.raw_buffer_atomic_fadd amdgpu.raw_buffer_atomic_fadd %val -> %buffer[%idx] : f32 -> memref<?xf32>, i32 func.return } + +// CHECK: func @atomic_fadd_v2f16 +func.func @atomic_fadd_v2f16(%val: vector<2xf16>, %buffer: memref<?xf16>, %idx: i32) { +// GFX9: amdgpu.raw_buffer_atomic_fadd +// GFX10: amdgpu.raw_buffer_load +// GFX10: amdgpu.raw_buffer_atomic_cmpswap +// Note: the atomic operation itself will be done over i32, and then we use bitcasts +// to scalars in order to test for exact bitwise equality instead of float +// equality. +// GFX11: %[[old:.+]] = amdgpu.raw_buffer_atomic_cmpswap +// GFX11: %[[vecCastExpected:.+]] = vector.bitcast %{{.*}} : vector<2xf16> to vector<1xi32> +// GFX11: %[[scalarExpected:.+]] = vector.extract %[[vecCastExpected]][0] +// GFX11: %[[vecCastOld:.+]] = vector.bitcast %[[old]] : vector<2xf16> to vector<1xi32> +// GFX11: %[[scalarOld:.+]] = vector.extract %[[vecCastOld]][0] +// GFX11: arith.cmpi eq, %[[scalarOld]], %[[scalarExpected]] + amdgpu.raw_buffer_atomic_fadd %val -> %buffer[%idx] : vector<2xf16> -> memref<?xf16>, i32 + func.return +} diff --git a/mlir/test/Target/LLVMIR/rocdl.mlir b/mlir/test/Target/LLVMIR/rocdl.mlir index 97b5057..0f0c2412 100644 --- a/mlir/test/Target/LLVMIR/rocdl.mlir +++ b/mlir/test/Target/LLVMIR/rocdl.mlir @@ -28,8 +28,10 @@ llvm.func @rocdl_special_regs() -> i32 { %12 = rocdl.grid.dim.z : i64 // CHECK: call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x() - %13 = rocdl.workitem.id.x {range = array<i32: 0, 64>} : i32 + %13 = rocdl.workitem.id.x range <i32, 0, 64> : i32 + // CHECK: call range(i64 1, 65) i64 @__ockl_get_local_size(i32 0) + %14 = rocdl.workgroup.dim.x range <i32, 1, 65> : i64 llvm.return %1 : i32 } diff --git a/mlir/test/Transforms/test-legalize-erased-op-with-uses.mlir b/mlir/test/Transforms/test-legalize-erased-op-with-uses.mlir index 49275e8..6e8f016 100644 --- a/mlir/test/Transforms/test-legalize-erased-op-with-uses.mlir +++ b/mlir/test/Transforms/test-legalize-erased-op-with-uses.mlir @@ -3,8 +3,8 @@ // Test that an error is emitted when an operation is marked as "erased", but // has users that live across the conversion. func.func @remove_all_ops(%arg0: i32) -> i32 { - // expected-error@below {{failed to legalize operation 'test.illegal_op_a' marked as erased}} + // expected-error@below {{failed to legalize unresolved materialization from () to 'i32' that remained live after conversion}} %0 = "test.illegal_op_a"() : () -> i32 - // expected-note@below {{found live user of result #0: func.return %0 : i32}} + // expected-note@below {{see existing live user here}} return %0 : i32 } diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel index 4be8e17..4646aef 100644 --- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel @@ -387,6 +387,7 @@ libc_support_library( hdrs = ["src/__support/CPP/new.h"], deps = [ ":__support_common", + ":__support_macros_properties_os", ], ) diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel index c8001fe..fae0482 100644 --- a/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/test/src/__support/BUILD.bazel @@ -79,6 +79,7 @@ libc_test( srcs = ["arg_list_test.cpp"], deps = [ "//libc:__support_arg_list", + "//libc:__support_macros_properties_os", ], ) diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel index 41b85d2..d3ae91a 100644 --- a/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/test/src/__support/FPUtil/BUILD.bazel @@ -17,6 +17,7 @@ libc_test( "//libc:__support_fputil_fp_bits", "//libc:__support_fputil_fpbits_str", "//libc:__support_integer_literals", + "//libc:__support_macros_properties_types", "//libc:__support_sign", ], ) @@ -41,7 +42,7 @@ libc_test( deps = [ "//libc:__support_fputil_rounding_mode", "//libc:__support_uint128", + "//libc:hdr_fenv_macros", "//libc/utils/MPFRWrapper:mpfr_wrapper", - "//libc:hdr_fenv_macros", ], ) diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/fenv/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/test/src/fenv/BUILD.bazel index 03c94d1..c6ae534 100644 --- a/utils/bazel/llvm-project-overlay/libc/test/src/fenv/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/libc/test/src/fenv/BUILD.bazel @@ -171,6 +171,7 @@ libc_test( ], deps = [ "//libc:__support_fputil_fenv_impl", + "//libc:__support_macros_properties_os", "//libc:hdr_fenv_macros", "//libc:types_fenv_t", "//libc/test/UnitTest:fp_test_helpers", diff --git a/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl b/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl index 2601da2e..d788705 100644 --- a/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl +++ b/utils/bazel/llvm-project-overlay/libc/test/src/math/libc_math_test_rules.bzl @@ -35,6 +35,7 @@ def math_test(name, hdrs = [], deps = [], **kwargs): "//libc:__support_fputil_nearest_integer_operations", "//libc:__support_fputil_normal_float", "//libc:__support_macros_properties_architectures", + "//libc:__support_macros_properties_os", "//libc:__support_math_extras", "//libc:__support_uint128", "//libc:hdr_errno_macros", diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel index c931898..4124897 100644 --- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel +++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel @@ -6004,6 +6004,7 @@ cc_library( ":LLVMCommonConversion", ":LLVMDialect", ":MathDialect", + ":MathToLLVM", ":MathToROCDL", ":MemRefDialect", ":MemRefToLLVM", |