diff options
39 files changed, 929 insertions, 436 deletions
diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml index 77f79a8..b78f2c6 100644 --- a/.github/workflows/libcxx-build-and-test.yaml +++ b/.github/workflows/libcxx-build-and-test.yaml @@ -255,7 +255,6 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install dependencies run: | - choco install -y ninja pip install psutil - name: Install a current LLVM if: ${{ matrix.mingw != true }} diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp index 11d2d5c..999e302c 100644 --- a/clang/lib/Sema/SemaConcept.cpp +++ b/clang/lib/Sema/SemaConcept.cpp @@ -1049,6 +1049,7 @@ ExprResult ConstraintSatisfactionChecker::Evaluate( case NormalizedConstraint::ConstraintKind::Compound: return Evaluate(static_cast<const CompoundConstraint &>(Constraint), MLTAL); } + llvm_unreachable("Unknown ConstraintKind enum"); } static bool CheckConstraintSatisfaction( @@ -2141,6 +2142,7 @@ bool SubstituteParameterMappings::substitute(NormalizedConstraint &N) { return substitute(Compound.getRHS()); } } + llvm_unreachable("Unknown ConstraintKind enum"); } } // namespace @@ -2561,7 +2563,6 @@ FormulaType SubsumptionChecker::Normalize(const NormalizedConstraint &NC) { }; switch (NC.getKind()) { - case NormalizedConstraint::ConstraintKind::Atomic: return {{find(&static_cast<const AtomicConstraint &>(NC))}}; @@ -2601,6 +2602,7 @@ FormulaType SubsumptionChecker::Normalize(const NormalizedConstraint &NC) { return Res; } } + llvm_unreachable("Unknown ConstraintKind enum"); } void SubsumptionChecker::AddUniqueClauseToFormula(Formula &F, Clause C) { diff --git a/clang/test/Headers/arm-acle-header.c b/clang/test/Headers/arm-acle-header.c index fea8472..58fcc66 100644 --- a/clang/test/Headers/arm-acle-header.c +++ b/clang/test/Headers/arm-acle-header.c @@ -10,6 +10,8 @@ // RUN: %clang_cc1 -x c++ -triple arm64ec-windows -target-cpu cortex-a53 -fsyntax-only -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=19.11 %s // expected-no-diagnostics +#include "system_reserved_names.h" + #include <arm_acle.h> #ifdef _MSC_VER #include <intrin.h> diff --git a/clang/test/Headers/arm-cde-header.c b/clang/test/Headers/arm-cde-header.c index 1f60368..526202a 100644 --- a/clang/test/Headers/arm-cde-header.c +++ b/clang/test/Headers/arm-cde-header.c @@ -9,5 +9,7 @@ // Check that the headers don't conflict with each other +#include "system_reserved_names.h" + #include <arm_cde.h> #include <arm_mve.h> diff --git a/clang/test/Headers/arm-cmse-header.c b/clang/test/Headers/arm-cmse-header.c index 862572d..c21c1ff 100644 --- a/clang/test/Headers/arm-cmse-header.c +++ b/clang/test/Headers/arm-cmse-header.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple thumbv8m.base-eabi -fsyntax-only -ffreestanding -x c++ %s -verify -mcmse // expected-no-diagnostics +#include "system_reserved_names.h" + #include <arm_cmse.h> typedef void (*callback_t)(void); diff --git a/clang/test/Headers/arm-fp16-header.c b/clang/test/Headers/arm-fp16-header.c index b1a87fa..e472654 100644 --- a/clang/test/Headers/arm-fp16-header.c +++ b/clang/test/Headers/arm-fp16-header.c @@ -18,4 +18,6 @@ // REQUIRES: aarch64-registered-target || arm-registered-target +#include "system_reserved_names.h" + #include <arm_fp16.h> diff --git a/clang/test/Headers/arm-neon-header.c b/clang/test/Headers/arm-neon-header.c index 89bd5aa..43b1b35 100644 --- a/clang/test/Headers/arm-neon-header.c +++ b/clang/test/Headers/arm-neon-header.c @@ -26,4 +26,6 @@ // REQUIRES: aarch64-registered-target || arm-registered-target +#include "system_reserved_names.h" + #include <arm_neon.h> diff --git a/clang/test/Headers/system_reserved_names.h b/clang/test/Headers/system_reserved_names.h new file mode 100644 index 0000000..1a53f4f --- /dev/null +++ b/clang/test/Headers/system_reserved_names.h @@ -0,0 +1,165 @@ +// Test that headers are not tripped up by the surrounding code defining various +// alphabetic macros. Also ensure that we don't swallow the definition of user +// provided macros (in other words, ensure that we push/pop correctly everywhere). +// +// The contents of this header is a lightly trimmed version of +// libcxx/test/libcxx/system_reserved_names.gen.py; additions to that testcase +// can be synced into this header as well. + +#define SYSTEM_RESERVED_NAME This name should not be used in Clang headers + +// libc++ does not use single-letter names as a matter of principle. +// But Windows' own <wchar.h>, <math.h>, and <exception> use many of these +// (at least C,E,F,I,M,N,P,S,X,Y,Z) as uglified function parameter names, +// so don't define these on Windows. +// +#ifndef _WIN32 +#define _A SYSTEM_RESERVED_NAME +#define _B SYSTEM_RESERVED_NAME +#define _C SYSTEM_RESERVED_NAME +#define _D SYSTEM_RESERVED_NAME +#define _E SYSTEM_RESERVED_NAME +#define _F SYSTEM_RESERVED_NAME +#define _G SYSTEM_RESERVED_NAME +#define _H SYSTEM_RESERVED_NAME +#define _I SYSTEM_RESERVED_NAME +#define _J SYSTEM_RESERVED_NAME +#define _K SYSTEM_RESERVED_NAME +#define _L SYSTEM_RESERVED_NAME +#define _M SYSTEM_RESERVED_NAME +#define _N SYSTEM_RESERVED_NAME +#define _O SYSTEM_RESERVED_NAME +#define _P SYSTEM_RESERVED_NAME +#define _Q SYSTEM_RESERVED_NAME +#define _R SYSTEM_RESERVED_NAME +#define _S SYSTEM_RESERVED_NAME +#define _T SYSTEM_RESERVED_NAME +#define _U SYSTEM_RESERVED_NAME +#define _V SYSTEM_RESERVED_NAME +#define _W SYSTEM_RESERVED_NAME +#define _X SYSTEM_RESERVED_NAME +#define _Y SYSTEM_RESERVED_NAME +#define _Z SYSTEM_RESERVED_NAME +#endif + +// FreeBSD's <sys/types.h> uses _M +// +#ifdef __FreeBSD__ +# undef _M +#endif + +// Test that libc++ doesn't use names that collide with FreeBSD system macros. +// newlib and picolibc also define these macros +#if !defined(__FreeBSD__) && !defined(_NEWLIB_VERSION) +# define __null_sentinel SYSTEM_RESERVED_NAME +# define __generic SYSTEM_RESERVED_NAME +#endif + +// tchar.h defines these macros on Windows +#ifndef _WIN32 +# define _UI SYSTEM_RESERVED_NAME +# define _PUC SYSTEM_RESERVED_NAME +# define _CPUC SYSTEM_RESERVED_NAME +# define _PC SYSTEM_RESERVED_NAME +# define _CRPC SYSTEM_RESERVED_NAME +# define _CPC SYSTEM_RESERVED_NAME +#endif + +// yvals.h on MINGW defines this macro +#ifndef _WIN32 +# define _C2 SYSTEM_RESERVED_NAME +#endif + +// Test that libc++ doesn't use names that collide with Win32 API macros. +// Obviously we can only define these on non-Windows platforms. +#ifndef _WIN32 +# define __allocator SYSTEM_RESERVED_NAME +# define __bound SYSTEM_RESERVED_NAME +# define __deallocate SYSTEM_RESERVED_NAME +# define __deref SYSTEM_RESERVED_NAME +# define __format_string SYSTEM_RESERVED_NAME +# define __full SYSTEM_RESERVED_NAME +# define __in SYSTEM_RESERVED_NAME +# define __inout SYSTEM_RESERVED_NAME +# define __nz SYSTEM_RESERVED_NAME +# define __out SYSTEM_RESERVED_NAME +# define __part SYSTEM_RESERVED_NAME +# define __post SYSTEM_RESERVED_NAME +# define __pre SYSTEM_RESERVED_NAME +#endif + +// Newlib & picolibc use __input as a parameter name of a64l & l64a +#ifndef _NEWLIB_VERSION +# define __input SYSTEM_RESERVED_NAME +#endif +#define __output SYSTEM_RESERVED_NAME + +#define __acquire SYSTEM_RESERVED_NAME +#define __release SYSTEM_RESERVED_NAME + +// Android and FreeBSD use this for __attribute__((__unused__)) +#if !defined(__FreeBSD__) && !defined(__ANDROID__) +#define __unused SYSTEM_RESERVED_NAME +#endif + +// These names are not reserved, so the user can macro-define them. +// These are intended to find improperly _Uglified template parameters. +#define A SYSTEM_RESERVED_NAME +#define Arg SYSTEM_RESERVED_NAME +#define Args SYSTEM_RESERVED_NAME +#define As SYSTEM_RESERVED_NAME +#define B SYSTEM_RESERVED_NAME +#define Bs SYSTEM_RESERVED_NAME +#define C SYSTEM_RESERVED_NAME +#define Cp SYSTEM_RESERVED_NAME +#define Cs SYSTEM_RESERVED_NAME +// Windows setjmp.h contains a struct member named 'D' on ARM/AArch64. +#ifndef _WIN32 +# define D SYSTEM_RESERVED_NAME +#endif +#define Dp SYSTEM_RESERVED_NAME +#define Ds SYSTEM_RESERVED_NAME +#define E SYSTEM_RESERVED_NAME +#define Ep SYSTEM_RESERVED_NAME +#define Es SYSTEM_RESERVED_NAME +#define N SYSTEM_RESERVED_NAME +#define Np SYSTEM_RESERVED_NAME +#define Ns SYSTEM_RESERVED_NAME +#define R SYSTEM_RESERVED_NAME +#define Rp SYSTEM_RESERVED_NAME +#define Rs SYSTEM_RESERVED_NAME +#define T SYSTEM_RESERVED_NAME +#define Tp SYSTEM_RESERVED_NAME +#define Ts SYSTEM_RESERVED_NAME +#define Type SYSTEM_RESERVED_NAME +#define Types SYSTEM_RESERVED_NAME +#define U SYSTEM_RESERVED_NAME +#define Up SYSTEM_RESERVED_NAME +#define Us SYSTEM_RESERVED_NAME +#define V SYSTEM_RESERVED_NAME +#define Vp SYSTEM_RESERVED_NAME +#define Vs SYSTEM_RESERVED_NAME +#define X SYSTEM_RESERVED_NAME +#define Xp SYSTEM_RESERVED_NAME +#define Xs SYSTEM_RESERVED_NAME + +// The classic Windows min/max macros +#define min SYSTEM_RESERVED_NAME +#define max SYSTEM_RESERVED_NAME + +// Test to make sure curses has no conflicting macros with the standard library +#define move SYSTEM_RESERVED_NAME +#define erase SYSTEM_RESERVED_NAME +#define refresh SYSTEM_RESERVED_NAME + +// Dinkumware libc ctype.h uses these definitions +#define _XA SYSTEM_RESERVED_NAME +#define _XS SYSTEM_RESERVED_NAME +#define _BB SYSTEM_RESERVED_NAME +#define _CN SYSTEM_RESERVED_NAME +#define _DI SYSTEM_RESERVED_NAME +#define _LO SYSTEM_RESERVED_NAME +#define _PU SYSTEM_RESERVED_NAME +#define _SP SYSTEM_RESERVED_NAME +#define _UP SYSTEM_RESERVED_NAME +#define _XD SYSTEM_RESERVED_NAME diff --git a/clang/test/Headers/x86-intrinsics-headers-clean.cpp b/clang/test/Headers/x86-intrinsics-headers-clean.cpp index a19207f..0a04bce 100644 --- a/clang/test/Headers/x86-intrinsics-headers-clean.cpp +++ b/clang/test/Headers/x86-intrinsics-headers-clean.cpp @@ -10,4 +10,6 @@ // expected-no-diagnostics +#include "system_reserved_names.h" + #include <x86intrin.h> diff --git a/clang/test/Headers/x86-intrinsics-headers.c b/clang/test/Headers/x86-intrinsics-headers.c index dc06cbd..89a7d4d 100644 --- a/clang/test/Headers/x86-intrinsics-headers.c +++ b/clang/test/Headers/x86-intrinsics-headers.c @@ -5,6 +5,8 @@ // XFAIL: target=arm64ec-pc-windows-msvc // These intrinsics are not yet implemented for Arm64EC. +#include "system_reserved_names.h" + #if defined(i386) || defined(__x86_64__) #ifdef __SSE4_2__ diff --git a/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp b/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp index 2c3751a..874c6e1 100644 --- a/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp +++ b/libcxx/test/std/experimental/simd/simd.class/simd_unary.pass.cpp @@ -14,6 +14,7 @@ // FIXME: This should work with -flax-vector-conversions=none // ADDITIONAL_COMPILE_FLAGS(clang): -flax-vector-conversions=integer +// ADDITIONAL_COMPILE_FLAGS(apple-clang): -flax-vector-conversions=integer // <experimental/simd> // diff --git a/llvm/docs/HowToReleaseLLVM.rst b/llvm/docs/HowToReleaseLLVM.rst index f3792e3..1795d3a 100644 --- a/llvm/docs/HowToReleaseLLVM.rst +++ b/llvm/docs/HowToReleaseLLVM.rst @@ -116,13 +116,11 @@ Branch the Git trunk using the following procedure: #. Bump the version in trunk to N.0.0git with the script in ``llvm/utils/release/bump-version.py``, and tag the commit with llvmorg-N-init. - If ``X`` is the version to be released, then ``N`` is ``X + 1``. + If ``X`` is the version to be released, then ``N`` is ``X + 1``. :: -:: - - $ git tag -sa llvmorg-N-init + $ git tag -sa llvmorg-N-init -4. Clear the release notes in trunk with the script in +#. Clear the release notes in trunk with the script in ``llvm/utils/release/clear-release-notes.py``. #. Create the release branch from the last known good revision from before the @@ -145,10 +143,12 @@ Tag release candidates: $ git tag -sa llvmorg-X.Y.Z-rcN The pre-packaged source tarballs will be automatically generated via the -"Release Sources" workflow on GitHub. This workflow will create an artifact -containing all the release tarballs and the artifact attestation. The -Release Manager should download the artifact, verify the tarballs, sign them, -and then upload them to the release page. +`Release Sources +<https://github.com/llvm/llvm-project/actions/workflows/release-sources.yml>`_ +workflow on GitHub. This workflow will create an artifact containing all the +release tarballs and the artifact attestation. The Release Manager should +download the artifact, verify the tarballs, sign them, and then upload them to +the release page. :: @@ -217,8 +217,9 @@ consistently validated and released binaries for their targets/OSs. To contact them, you should post on the `Discourse forums (Project Infrastructure - Release Testers). <https://discourse.llvm.org/c/infrastructure/release-testers/66>`_ -The official testers list is in the file ``RELEASE_TESTERS.TXT``, in the ``LLVM`` -repository. +The official testers list is in the file `RELEASE_TESTERS.TXT +<https://github.com/llvm/llvm-project/blob/main/llvm/RELEASE_TESTERS.TXT>`_, in +the LLVM repository. Community Testing ----------------- @@ -276,7 +277,8 @@ from the Milestone. Debugging can continue, but on trunk. Backport Requests ----------------- -Instructions for requesting a backport to a stable branch can be found :doc:`here <GitHub>`. +Instructions for requesting a backport to a stable branch can be found +:ref:`here <backporting>`. Triaging Bug Reports for Releases --------------------------------- @@ -301,26 +303,19 @@ This section describes how to triage bug reports: using the /cherry-pick or /branch comments if this has not been done already. #. If a bug has been fixed and has a pull request created for backporting it, - then update its status to "Needs Review" and notify a knowledgeable reviewer. - Usually you will want to notify the person who approved the patch in Phabricator, - but you may use your best judgement on who a good reviewer would be. Once - you have identified the reviewer(s), assign the issue to them and mention - them (i.e @username) in a comment and ask them if the patch is safe to backport. - You should also review the bug yourself to ensure that it meets the requirements - for committing to the release branch. + then update its status to "Needs Review" and notify a knowledgeable + reviewer. Usually you will want to notify the person who approved the + patch, but you may use your best judgement on who a good reviewer would be. + Once you have identified the reviewer(s), assign the issue to them and + mention them (i.e @username) in a comment and ask them if the patch is safe + to backport. You should also review the bug yourself to ensure that it + meets the requirements for committing to the release branch. #. Once a bug has been reviewed, add the release:reviewed label and update the issue's status to "Needs Merge". Check the pull request associated with the issue. If all the tests pass, then the pull request can be merged. If not, then add a comment on the issue asking someone to take a look at the failures. -#. Once the pull request has been merged push it to the official release branch - with the script ``llvm/utils/git/sync-release-repo.sh``. - - Then add a comment to the issue stating that the fix has been merged along with - the git hashes from the release branch. Add the release:merged label to the issue - and close it. - Release Patch Rules ------------------- @@ -364,9 +359,8 @@ Update Documentation Review the documentation in the release branch and ensure that it is up to date. The "Release Notes" must be updated to reflect new features, bug fixes, new known issues, and changes in the list of supported platforms. -The "Getting Started Guide" should be updated to reflect the new release -version number tag available from Subversion and changes in basic system -requirements. +The :doc:`GettingStarted` page should be updated to reflect the new release +version number tag and changes in basic system requirements. .. _tag: @@ -386,7 +380,8 @@ Update the LLVM Website The website must be updated before the release announcement is sent out. Here is what to do: -#. Check out the ``www-releases`` module from GitHub. +#. Check out the `www-releases <https://github.com/llvm/www-releases>`_ repo + from GitHub. #. Create a new sub-directory ``X.Y.Z`` in the releases directory. diff --git a/llvm/include/llvm/IR/IntrinsicsSPIRV.td b/llvm/include/llvm/IR/IntrinsicsSPIRV.td index 823c491..66e24fa 100644 --- a/llvm/include/llvm/IR/IntrinsicsSPIRV.td +++ b/llvm/include/llvm/IR/IntrinsicsSPIRV.td @@ -150,6 +150,14 @@ def int_spv_rsqrt : DefaultAttrsIntrinsic<[LLVMMatchType<0>], [llvm_anyfloat_ty] [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [IntrNoMem]>; + def int_spv_resource_counterhandlefromimplicitbinding + : DefaultAttrsIntrinsic<[llvm_any_ty], + [llvm_any_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_spv_resource_counterhandlefrombinding + : DefaultAttrsIntrinsic<[llvm_any_ty], + [llvm_any_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; def int_spv_firstbituhigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>; def int_spv_firstbitshigh : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>], [llvm_anyint_ty], [IntrNoMem]>; diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index b98f797..c76689f 100644 --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -2878,7 +2878,7 @@ static SVEStackSizes determineSVEStackSizes(MachineFunction &MF, StackTop += MFI.getObjectSize(FI); StackTop = alignTo(StackTop, Alignment); - assert(StackTop < std::numeric_limits<int64_t>::max() && + assert(StackTop < (uint64_t)std::numeric_limits<int64_t>::max() && "SVE StackTop far too large?!"); int64_t Offset = -int64_t(StackTop); diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index 0afec42..989950f 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -307,6 +307,10 @@ private: bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const; + bool selectCounterHandleFromBinding(Register &ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const; + bool selectReadImageIntrinsic(Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const; bool selectImageWriteIntrinsic(MachineInstr &I) const; @@ -314,6 +318,8 @@ private: MachineInstr &I) const; bool selectModf(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; + bool selectUpdateCounter(Register &ResVReg, const SPIRVType *ResType, + MachineInstr &I) const; bool selectFrexp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const; // Utilities @@ -3443,6 +3449,10 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, case Intrinsic::spv_resource_handlefrombinding: { return selectHandleFromBinding(ResVReg, ResType, I); } + case Intrinsic::spv_resource_counterhandlefrombinding: + return selectCounterHandleFromBinding(ResVReg, ResType, I); + case Intrinsic::spv_resource_updatecounter: + return selectUpdateCounter(ResVReg, ResType, I); case Intrinsic::spv_resource_store_typedbuffer: { return selectImageWriteIntrinsic(I); } @@ -3478,6 +3488,130 @@ bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg, *cast<GIntrinsic>(&I), I); } +bool SPIRVInstructionSelector::selectCounterHandleFromBinding( + Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const { + auto &Intr = cast<GIntrinsic>(I); + assert(Intr.getIntrinsicID() == + Intrinsic::spv_resource_counterhandlefrombinding); + + // Extract information from the intrinsic call. + Register MainHandleReg = Intr.getOperand(2).getReg(); + auto *MainHandleDef = cast<GIntrinsic>(getVRegDef(*MRI, MainHandleReg)); + assert(MainHandleDef->getIntrinsicID() == + Intrinsic::spv_resource_handlefrombinding); + + uint32_t Set = getIConstVal(Intr.getOperand(4).getReg(), MRI); + uint32_t Binding = getIConstVal(Intr.getOperand(3).getReg(), MRI); + uint32_t ArraySize = getIConstVal(MainHandleDef->getOperand(4).getReg(), MRI); + Register IndexReg = MainHandleDef->getOperand(5).getReg(); + const bool IsNonUniform = false; + std::string CounterName = + getStringValueFromReg(MainHandleDef->getOperand(6).getReg(), *MRI) + + ".counter"; + + // Create the counter variable. + MachineIRBuilder MIRBuilder(I); + Register CounterVarReg = buildPointerToResource( + GR.getPointeeType(ResType), GR.getPointerStorageClass(ResType), Set, + Binding, ArraySize, IndexReg, IsNonUniform, CounterName, MIRBuilder); + + return BuildCOPY(ResVReg, CounterVarReg, I); +} + +bool SPIRVInstructionSelector::selectUpdateCounter(Register &ResVReg, + const SPIRVType *ResType, + MachineInstr &I) const { + auto &Intr = cast<GIntrinsic>(I); + assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter); + + Register CounterHandleReg = Intr.getOperand(2).getReg(); + Register IncrReg = Intr.getOperand(3).getReg(); + + // The counter handle is a pointer to the counter variable (which is a struct + // containing an i32). We need to get a pointer to that i32 member to do the + // atomic operation. +#ifndef NDEBUG + SPIRVType *CounterVarType = GR.getSPIRVTypeForVReg(CounterHandleReg); + SPIRVType *CounterVarPointeeType = GR.getPointeeType(CounterVarType); + assert(CounterVarPointeeType && + CounterVarPointeeType->getOpcode() == SPIRV::OpTypeStruct && + "Counter variable must be a struct"); + assert(GR.getPointerStorageClass(CounterVarType) == + SPIRV::StorageClass::StorageBuffer && + "Counter variable must be in the storage buffer storage class"); + assert(CounterVarPointeeType->getNumOperands() == 2 && + "Counter variable must have exactly 1 member in the struct"); + const SPIRVType *MemberType = + GR.getSPIRVTypeForVReg(CounterVarPointeeType->getOperand(1).getReg()); + assert(MemberType->getOpcode() == SPIRV::OpTypeInt && + "Counter variable struct must have a single i32 member"); +#endif + + // The struct has a single i32 member. + MachineIRBuilder MIRBuilder(I); + const Type *LLVMIntType = + Type::getInt32Ty(I.getMF()->getFunction().getContext()); + + SPIRVType *IntPtrType = GR.getOrCreateSPIRVPointerType( + LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer); + + auto Zero = buildI32Constant(0, I); + if (!Zero.second) + return false; + + Register PtrToCounter = + MRI->createVirtualRegister(GR.getRegClass(IntPtrType)); + if (!BuildMI(*I.getParent(), I, I.getDebugLoc(), + TII.get(SPIRV::OpAccessChain)) + .addDef(PtrToCounter) + .addUse(GR.getSPIRVTypeID(IntPtrType)) + .addUse(CounterHandleReg) + .addUse(Zero.first) + .constrainAllUses(TII, TRI, RBI)) { + return false; + } + + // For UAV/SSBO counters, the scope is Device. The counter variable is not + // used as a flag. So the memory semantics can be None. + auto Scope = buildI32Constant(SPIRV::Scope::Device, I); + if (!Scope.second) + return false; + auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None, I); + if (!Semantics.second) + return false; + + int64_t IncrVal = getIConstValSext(IncrReg, MRI); + auto Incr = buildI32Constant(static_cast<uint32_t>(IncrVal), I); + if (!Incr.second) + return false; + + Register AtomicRes = MRI->createVirtualRegister(GR.getRegClass(ResType)); + if (!BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpAtomicIAdd)) + .addDef(AtomicRes) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(PtrToCounter) + .addUse(Scope.first) + .addUse(Semantics.first) + .addUse(Incr.first) + .constrainAllUses(TII, TRI, RBI)) { + return false; + } + if (IncrVal >= 0) { + return BuildCOPY(ResVReg, AtomicRes, I); + } + + // In HLSL, IncrementCounter returns the value *before* the increment, while + // DecrementCounter returns the value *after* the decrement. Both are lowered + // to the same atomic intrinsic which returns the value *before* the + // operation. So for decrements (negative IncrVal), we must subtract the + // increment value from the result to get the post-decrement value. + return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(AtomicRes) + .addUse(Incr.first) + .constrainAllUses(TII, TRI, RBI); +} bool SPIRVInstructionSelector::selectReadImageIntrinsic( Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const { diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp index 205895e..fc14a03 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizeImplicitBinding.cpp @@ -39,6 +39,10 @@ private: void collectBindingInfo(Module &M); uint32_t getAndReserveFirstUnusedBinding(uint32_t DescSet); void replaceImplicitBindingCalls(Module &M); + void replaceResourceHandleCall(Module &M, CallInst *OldCI, + uint32_t NewBinding); + void replaceCounterHandleCall(Module &M, CallInst *OldCI, + uint32_t NewBinding); void verifyUniqueOrderIdPerResource(SmallVectorImpl<CallInst *> &Calls); // A map from descriptor set to a bit vector of used binding numbers. @@ -56,64 +60,93 @@ struct BindingInfoCollector : public InstVisitor<BindingInfoCollector> { : UsedBindings(UsedBindings), ImplicitBindingCalls(ImplicitBindingCalls) { } + void addBinding(uint32_t DescSet, uint32_t Binding) { + if (UsedBindings.size() <= DescSet) { + UsedBindings.resize(DescSet + 1); + UsedBindings[DescSet].resize(64); + } + if (UsedBindings[DescSet].size() <= Binding) { + UsedBindings[DescSet].resize(2 * Binding + 1); + } + UsedBindings[DescSet].set(Binding); + } + void visitCallInst(CallInst &CI) { if (CI.getIntrinsicID() == Intrinsic::spv_resource_handlefrombinding) { const uint32_t DescSet = cast<ConstantInt>(CI.getArgOperand(0))->getZExtValue(); const uint32_t Binding = cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); - - if (UsedBindings.size() <= DescSet) { - UsedBindings.resize(DescSet + 1); - UsedBindings[DescSet].resize(64); - } - if (UsedBindings[DescSet].size() <= Binding) { - UsedBindings[DescSet].resize(2 * Binding + 1); - } - UsedBindings[DescSet].set(Binding); + addBinding(DescSet, Binding); } else if (CI.getIntrinsicID() == Intrinsic::spv_resource_handlefromimplicitbinding) { ImplicitBindingCalls.push_back(&CI); + } else if (CI.getIntrinsicID() == + Intrinsic::spv_resource_counterhandlefrombinding) { + const uint32_t DescSet = + cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue(); + const uint32_t Binding = + cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue(); + addBinding(DescSet, Binding); + } else if (CI.getIntrinsicID() == + Intrinsic::spv_resource_counterhandlefromimplicitbinding) { + ImplicitBindingCalls.push_back(&CI); } } }; +static uint32_t getOrderId(const CallInst *CI) { + uint32_t OrderIdArgIdx = 0; + switch (CI->getIntrinsicID()) { + case Intrinsic::spv_resource_handlefromimplicitbinding: + OrderIdArgIdx = 0; + break; + case Intrinsic::spv_resource_counterhandlefromimplicitbinding: + OrderIdArgIdx = 1; + break; + default: + llvm_unreachable("CallInst is not an implicit binding intrinsic"); + } + return cast<ConstantInt>(CI->getArgOperand(OrderIdArgIdx))->getZExtValue(); +} + +static uint32_t getDescSet(const CallInst *CI) { + uint32_t DescSetArgIdx; + switch (CI->getIntrinsicID()) { + case Intrinsic::spv_resource_handlefromimplicitbinding: + case Intrinsic::spv_resource_handlefrombinding: + DescSetArgIdx = 1; + break; + case Intrinsic::spv_resource_counterhandlefromimplicitbinding: + case Intrinsic::spv_resource_counterhandlefrombinding: + DescSetArgIdx = 2; + break; + default: + llvm_unreachable("CallInst is not an implicit binding intrinsic"); + } + return cast<ConstantInt>(CI->getArgOperand(DescSetArgIdx))->getZExtValue(); +} + void SPIRVLegalizeImplicitBinding::collectBindingInfo(Module &M) { BindingInfoCollector InfoCollector(UsedBindings, ImplicitBindingCalls); InfoCollector.visit(M); // Sort the collected calls by their order ID. - std::sort( - ImplicitBindingCalls.begin(), ImplicitBindingCalls.end(), - [](const CallInst *A, const CallInst *B) { - const uint32_t OrderIdArgIdx = 0; - const uint32_t OrderA = - cast<ConstantInt>(A->getArgOperand(OrderIdArgIdx))->getZExtValue(); - const uint32_t OrderB = - cast<ConstantInt>(B->getArgOperand(OrderIdArgIdx))->getZExtValue(); - return OrderA < OrderB; - }); + std::sort(ImplicitBindingCalls.begin(), ImplicitBindingCalls.end(), + [](const CallInst *A, const CallInst *B) { + return getOrderId(A) < getOrderId(B); + }); } void SPIRVLegalizeImplicitBinding::verifyUniqueOrderIdPerResource( SmallVectorImpl<CallInst *> &Calls) { // Check that the order Id is unique per resource. for (uint32_t i = 1; i < Calls.size(); ++i) { - const uint32_t OrderIdArgIdx = 0; - const uint32_t DescSetArgIdx = 1; - const uint32_t OrderA = - cast<ConstantInt>(Calls[i - 1]->getArgOperand(OrderIdArgIdx)) - ->getZExtValue(); - const uint32_t OrderB = - cast<ConstantInt>(Calls[i]->getArgOperand(OrderIdArgIdx)) - ->getZExtValue(); + const uint32_t OrderA = getOrderId(Calls[i - 1]); + const uint32_t OrderB = getOrderId(Calls[i]); if (OrderA == OrderB) { - const uint32_t DescSetA = - cast<ConstantInt>(Calls[i - 1]->getArgOperand(DescSetArgIdx)) - ->getZExtValue(); - const uint32_t DescSetB = - cast<ConstantInt>(Calls[i]->getArgOperand(DescSetArgIdx)) - ->getZExtValue(); + const uint32_t DescSetA = getDescSet(Calls[i - 1]); + const uint32_t DescSetB = getDescSet(Calls[i]); if (DescSetA != DescSetB) { report_fatal_error("Implicit binding calls with the same order ID must " "have the same descriptor set"); @@ -144,36 +177,26 @@ void SPIRVLegalizeImplicitBinding::replaceImplicitBindingCalls(Module &M) { uint32_t lastBindingNumber = -1; for (CallInst *OldCI : ImplicitBindingCalls) { - IRBuilder<> Builder(OldCI); - const uint32_t OrderId = - cast<ConstantInt>(OldCI->getArgOperand(0))->getZExtValue(); - const uint32_t DescSet = - cast<ConstantInt>(OldCI->getArgOperand(1))->getZExtValue(); - - // Reuse an existing binding for this order ID, if one was already assigned. - // Otherwise, assign a new binding. - const uint32_t NewBinding = (lastOrderId == OrderId) - ? lastBindingNumber - : getAndReserveFirstUnusedBinding(DescSet); - lastOrderId = OrderId; - lastBindingNumber = NewBinding; - - SmallVector<Value *, 8> Args; - Args.push_back(Builder.getInt32(DescSet)); - Args.push_back(Builder.getInt32(NewBinding)); - - // Copy the remaining arguments from the old call. - for (uint32_t i = 2; i < OldCI->arg_size(); ++i) { - Args.push_back(OldCI->getArgOperand(i)); + const uint32_t OrderId = getOrderId(OldCI); + uint32_t BindingNumber; + if (OrderId == lastOrderId) { + BindingNumber = lastBindingNumber; + } else { + const uint32_t DescSet = getDescSet(OldCI); + BindingNumber = getAndReserveFirstUnusedBinding(DescSet); } - Function *NewFunc = Intrinsic::getOrInsertDeclaration( - &M, Intrinsic::spv_resource_handlefrombinding, OldCI->getType()); - CallInst *NewCI = Builder.CreateCall(NewFunc, Args); - NewCI->setCallingConv(OldCI->getCallingConv()); - - OldCI->replaceAllUsesWith(NewCI); - OldCI->eraseFromParent(); + if (OldCI->getIntrinsicID() == + Intrinsic::spv_resource_handlefromimplicitbinding) { + replaceResourceHandleCall(M, OldCI, BindingNumber); + } else { + assert(OldCI->getIntrinsicID() == + Intrinsic::spv_resource_counterhandlefromimplicitbinding && + "Unexpected implicit binding intrinsic"); + replaceCounterHandleCall(M, OldCI, BindingNumber); + } + lastOrderId = OrderId; + lastBindingNumber = BindingNumber; } } @@ -196,4 +219,49 @@ INITIALIZE_PASS(SPIRVLegalizeImplicitBinding, "legalize-spirv-implicit-binding", ModulePass *llvm::createSPIRVLegalizeImplicitBindingPass() { return new SPIRVLegalizeImplicitBinding(); -}
\ No newline at end of file +} + +void SPIRVLegalizeImplicitBinding::replaceResourceHandleCall( + Module &M, CallInst *OldCI, uint32_t NewBinding) { + IRBuilder<> Builder(OldCI); + const uint32_t DescSet = + cast<ConstantInt>(OldCI->getArgOperand(1))->getZExtValue(); + + SmallVector<Value *, 8> Args; + Args.push_back(Builder.getInt32(DescSet)); + Args.push_back(Builder.getInt32(NewBinding)); + + // Copy the remaining arguments from the old call. + for (uint32_t i = 2; i < OldCI->arg_size(); ++i) { + Args.push_back(OldCI->getArgOperand(i)); + } + + Function *NewFunc = Intrinsic::getOrInsertDeclaration( + &M, Intrinsic::spv_resource_handlefrombinding, OldCI->getType()); + CallInst *NewCI = Builder.CreateCall(NewFunc, Args); + NewCI->setCallingConv(OldCI->getCallingConv()); + + OldCI->replaceAllUsesWith(NewCI); + OldCI->eraseFromParent(); +} + +void SPIRVLegalizeImplicitBinding::replaceCounterHandleCall( + Module &M, CallInst *OldCI, uint32_t NewBinding) { + IRBuilder<> Builder(OldCI); + const uint32_t DescSet = + cast<ConstantInt>(OldCI->getArgOperand(2))->getZExtValue(); + + SmallVector<Value *, 8> Args; + Args.push_back(OldCI->getArgOperand(0)); + Args.push_back(Builder.getInt32(NewBinding)); + Args.push_back(Builder.getInt32(DescSet)); + + Type *Tys[] = {OldCI->getType(), OldCI->getArgOperand(0)->getType()}; + Function *NewFunc = Intrinsic::getOrInsertDeclaration( + &M, Intrinsic::spv_resource_counterhandlefrombinding, Tys); + CallInst *NewCI = Builder.CreateCall(NewFunc, Args); + NewCI->setCallingConv(OldCI->getCallingConv()); + + OldCI->replaceAllUsesWith(NewCI); + OldCI->eraseFromParent(); +} diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp index 327c011..1d47c89 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp @@ -385,6 +385,12 @@ uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI) { return MI->getOperand(1).getCImm()->getValue().getZExtValue(); } +int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI) { + const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI); + assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT); + return MI->getOperand(1).getCImm()->getSExtValue(); +} + bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) { if (const auto *GI = dyn_cast<GIntrinsic>(&MI)) return GI->is(IntrinsicID); diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h index 409a0fd..5777a24 100644 --- a/llvm/lib/Target/SPIRV/SPIRVUtils.h +++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h @@ -289,6 +289,9 @@ MachineInstr *getDefInstrMaybeConstant(Register &ConstReg, // Get constant integer value of the given ConstReg. uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI); +// Get constant integer value of the given ConstReg, sign-extended. +int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI); + // Check if MI is a SPIR-V specific intrinsic call. bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID); // Check if it's a SPIR-V specific intrinsic call. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 56a3d6d..e434e73 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -8201,211 +8201,6 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, } } -/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the -/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute -/// the end value of the induction. -static VPInstruction *addResumePhiRecipeForInduction( - VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder, - VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) { - auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV); - // Truncated wide inductions resume from the last lane of their vector value - // in the last vector iteration which is handled elsewhere. - if (WideIntOrFp && WideIntOrFp->getTruncInst()) - return nullptr; - - VPValue *Start = WideIV->getStartValue(); - VPValue *Step = WideIV->getStepValue(); - const InductionDescriptor &ID = WideIV->getInductionDescriptor(); - VPValue *EndValue = VectorTC; - if (!WideIntOrFp || !WideIntOrFp->isCanonical()) { - EndValue = VectorPHBuilder.createDerivedIV( - ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()), - Start, VectorTC, Step); - } - - // EndValue is derived from the vector trip count (which has the same type as - // the widest induction) and thus may be wider than the induction here. - Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV); - if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) { - EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue, - ScalarTypeOfWideIV, - WideIV->getDebugLoc()); - } - - auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi( - {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val"); - return ResumePhiRecipe; -} - -/// Create resume phis in the scalar preheader for first-order recurrences, -/// reductions and inductions, and update the VPIRInstructions wrapping the -/// original phis in the scalar header. End values for inductions are added to -/// \p IVEndValues. -static void addScalarResumePhis(VPRecipeBuilder &Builder, VPlan &Plan, - DenseMap<VPValue *, VPValue *> &IVEndValues) { - VPTypeAnalysis TypeInfo(Plan); - auto *ScalarPH = Plan.getScalarPreheader(); - auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]); - VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion(); - VPBuilder VectorPHBuilder( - cast<VPBasicBlock>(VectorRegion->getSinglePredecessor())); - VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi()); - VPBuilder ScalarPHBuilder(ScalarPH); - for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) { - auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR); - - // TODO: Extract final value from induction recipe initially, optimize to - // pre-computed end value together in optimizeInductionExitUsers. - auto *VectorPhiR = - cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi())); - if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) { - if (VPInstruction *ResumePhi = addResumePhiRecipeForInduction( - WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo, - &Plan.getVectorTripCount())) { - assert(isa<VPPhi>(ResumePhi) && "Expected a phi"); - IVEndValues[WideIVR] = ResumePhi->getOperand(0); - ScalarPhiIRI->addOperand(ResumePhi); - continue; - } - // TODO: Also handle truncated inductions here. Computing end-values - // separately should be done as VPlan-to-VPlan optimization, after - // legalizing all resume values to use the last lane from the loop. - assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() && - "should only skip truncated wide inductions"); - continue; - } - - // The backedge value provides the value to resume coming out of a loop, - // which for FORs is a vector whose last element needs to be extracted. The - // start value provides the value if the loop is bypassed. - bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR); - auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue(); - assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() && - "Cannot handle loops with uncountable early exits"); - if (IsFOR) - ResumeFromVectorLoop = MiddleBuilder.createNaryOp( - VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {}, - "vector.recur.extract"); - StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx"; - auto *ResumePhiR = ScalarPHBuilder.createScalarPhi( - {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name); - ScalarPhiIRI->addOperand(ResumePhiR); - } -} - -/// Handle users in the exit block for first order reductions in the original -/// exit block. The penultimate value of recurrences is fed to their LCSSA phi -/// users in the original exit block using the VPIRInstruction wrapping to the -/// LCSSA phi. -static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range) { - VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion(); - auto *ScalarPHVPBB = Plan.getScalarPreheader(); - auto *MiddleVPBB = Plan.getMiddleBlock(); - VPBuilder ScalarPHBuilder(ScalarPHVPBB); - VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi()); - - auto IsScalableOne = [](ElementCount VF) -> bool { - return VF == ElementCount::getScalable(1); - }; - - for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) { - auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi); - if (!FOR) - continue; - - assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() && - "Cannot handle loops with uncountable early exits"); - - // This is the second phase of vectorizing first-order recurrences, creating - // extract for users outside the loop. An overview of the transformation is - // described below. Suppose we have the following loop with some use after - // the loop of the last a[i-1], - // - // for (int i = 0; i < n; ++i) { - // t = a[i - 1]; - // b[i] = a[i] - t; - // } - // use t; - // - // There is a first-order recurrence on "a". For this loop, the shorthand - // scalar IR looks like: - // - // scalar.ph: - // s.init = a[-1] - // br scalar.body - // - // scalar.body: - // i = phi [0, scalar.ph], [i+1, scalar.body] - // s1 = phi [s.init, scalar.ph], [s2, scalar.body] - // s2 = a[i] - // b[i] = s2 - s1 - // br cond, scalar.body, exit.block - // - // exit.block: - // use = lcssa.phi [s1, scalar.body] - // - // In this example, s1 is a recurrence because it's value depends on the - // previous iteration. In the first phase of vectorization, we created a - // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts - // for users in the scalar preheader and exit block. - // - // vector.ph: - // v_init = vector(..., ..., ..., a[-1]) - // br vector.body - // - // vector.body - // i = phi [0, vector.ph], [i+4, vector.body] - // v1 = phi [v_init, vector.ph], [v2, vector.body] - // v2 = a[i, i+1, i+2, i+3] - // b[i] = v2 - v1 - // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2)) - // b[i, i+1, i+2, i+3] = v2 - v1 - // br cond, vector.body, middle.block - // - // middle.block: - // vector.recur.extract.for.phi = v2(2) - // vector.recur.extract = v2(3) - // br cond, scalar.ph, exit.block - // - // scalar.ph: - // scalar.recur.init = phi [vector.recur.extract, middle.block], - // [s.init, otherwise] - // br scalar.body - // - // scalar.body: - // i = phi [0, scalar.ph], [i+1, scalar.body] - // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body] - // s2 = a[i] - // b[i] = s2 - s1 - // br cond, scalar.body, exit.block - // - // exit.block: - // lo = lcssa.phi [s1, scalar.body], - // [vector.recur.extract.for.phi, middle.block] - // - // Now update VPIRInstructions modeling LCSSA phis in the exit block. - // Extract the penultimate value of the recurrence and use it as operand for - // the VPIRInstruction modeling the phi. - for (VPUser *U : FOR->users()) { - using namespace llvm::VPlanPatternMatch; - if (!match(U, m_ExtractLastElement(m_Specific(FOR)))) - continue; - // For VF vscale x 1, if vscale = 1, we are unable to extract the - // penultimate value of the recurrence. Instead we rely on the existing - // extract of the last element from the result of - // VPInstruction::FirstOrderRecurrenceSplice. - // TODO: Consider vscale_range info and UF. - if (LoopVectorizationPlanner::getDecisionAndClampRange(IsScalableOne, - Range)) - return; - VPValue *PenultimateElement = MiddleBuilder.createNaryOp( - VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()}, - {}, "vector.recur.extract.for.phi"); - cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement); - } - } -} - VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( VPlanPtr Plan, VFRange &Range, LoopVersioning *LVer) { @@ -8598,9 +8393,11 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( R->setOperand(1, WideIV->getStepValue()); } - addExitUsersForFirstOrderRecurrences(*Plan, Range); + VPlanTransforms::runPass( + VPlanTransforms::addExitUsersForFirstOrderRecurrences, *Plan, Range); DenseMap<VPValue *, VPValue *> IVEndValues; - addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues); + VPlanTransforms::runPass(VPlanTransforms::addScalarResumePhis, *Plan, + RecipeBuilder, IVEndValues); // --------------------------------------------------------------------------- // Transform initial VPlan: Apply previously taken decisions, in order, to @@ -8711,7 +8508,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlan(VFRange &Range) { DenseMap<VPValue *, VPValue *> IVEndValues; // TODO: IVEndValues are not used yet in the native path, to optimize exit // values. - addScalarResumePhis(RecipeBuilder, *Plan, IVEndValues); + VPlanTransforms::runPass(VPlanTransforms::addScalarResumePhis, *Plan, + RecipeBuilder, IVEndValues); assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid"); return Plan; diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index fedca65..91c3d42 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -10620,7 +10620,8 @@ class InstructionsCompatibilityAnalysis { /// Checks if the opcode is supported as the main opcode for copyable /// elements. static bool isSupportedOpcode(const unsigned Opcode) { - return Opcode == Instruction::Add || Opcode == Instruction::LShr; + return Opcode == Instruction::Add || Opcode == Instruction::LShr || + Opcode == Instruction::Shl; } /// Identifies the best candidate value, which represents main opcode @@ -10937,6 +10938,7 @@ public: switch (MainOpcode) { case Instruction::Add: case Instruction::LShr: + case Instruction::Shl: VectorCost = TTI.getArithmeticInstrCost(MainOpcode, VecTy, Kind); break; default: @@ -22006,6 +22008,8 @@ bool BoUpSLP::collectValuesToDemote( return all_of(E.Scalars, [&](Value *V) { if (isa<PoisonValue>(V)) return true; + if (E.isCopyableElement(V)) + return true; auto *I = cast<Instruction>(V); KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL); return AmtKnownBits.getMaxValue().ult(BitWidth); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index ca63bf3..ebf833e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -4198,3 +4198,202 @@ void VPlanTransforms::addBranchWeightToMiddleTerminator( MDB.createBranchWeights({1, VectorStep - 1}, /*IsExpected=*/false); MiddleTerm->addMetadata(LLVMContext::MD_prof, BranchWeights); } + +/// Create and return a ResumePhi for \p WideIV, unless it is truncated. If the +/// induction recipe is not canonical, creates a VPDerivedIVRecipe to compute +/// the end value of the induction. +static VPInstruction *addResumePhiRecipeForInduction( + VPWidenInductionRecipe *WideIV, VPBuilder &VectorPHBuilder, + VPBuilder &ScalarPHBuilder, VPTypeAnalysis &TypeInfo, VPValue *VectorTC) { + auto *WideIntOrFp = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV); + // Truncated wide inductions resume from the last lane of their vector value + // in the last vector iteration which is handled elsewhere. + if (WideIntOrFp && WideIntOrFp->getTruncInst()) + return nullptr; + + VPValue *Start = WideIV->getStartValue(); + VPValue *Step = WideIV->getStepValue(); + const InductionDescriptor &ID = WideIV->getInductionDescriptor(); + VPValue *EndValue = VectorTC; + if (!WideIntOrFp || !WideIntOrFp->isCanonical()) { + EndValue = VectorPHBuilder.createDerivedIV( + ID.getKind(), dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()), + Start, VectorTC, Step); + } + + // EndValue is derived from the vector trip count (which has the same type as + // the widest induction) and thus may be wider than the induction here. + Type *ScalarTypeOfWideIV = TypeInfo.inferScalarType(WideIV); + if (ScalarTypeOfWideIV != TypeInfo.inferScalarType(EndValue)) { + EndValue = VectorPHBuilder.createScalarCast(Instruction::Trunc, EndValue, + ScalarTypeOfWideIV, + WideIV->getDebugLoc()); + } + + auto *ResumePhiRecipe = ScalarPHBuilder.createScalarPhi( + {EndValue, Start}, WideIV->getDebugLoc(), "bc.resume.val"); + return ResumePhiRecipe; +} + +void VPlanTransforms::addScalarResumePhis( + VPlan &Plan, VPRecipeBuilder &Builder, + DenseMap<VPValue *, VPValue *> &IVEndValues) { + VPTypeAnalysis TypeInfo(Plan); + auto *ScalarPH = Plan.getScalarPreheader(); + auto *MiddleVPBB = cast<VPBasicBlock>(ScalarPH->getPredecessors()[0]); + VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion(); + VPBuilder VectorPHBuilder( + cast<VPBasicBlock>(VectorRegion->getSinglePredecessor())); + VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi()); + VPBuilder ScalarPHBuilder(ScalarPH); + for (VPRecipeBase &ScalarPhiR : Plan.getScalarHeader()->phis()) { + auto *ScalarPhiIRI = cast<VPIRPhi>(&ScalarPhiR); + + // TODO: Extract final value from induction recipe initially, optimize to + // pre-computed end value together in optimizeInductionExitUsers. + auto *VectorPhiR = + cast<VPHeaderPHIRecipe>(Builder.getRecipe(&ScalarPhiIRI->getIRPhi())); + if (auto *WideIVR = dyn_cast<VPWidenInductionRecipe>(VectorPhiR)) { + if (VPInstruction *ResumePhi = addResumePhiRecipeForInduction( + WideIVR, VectorPHBuilder, ScalarPHBuilder, TypeInfo, + &Plan.getVectorTripCount())) { + assert(isa<VPPhi>(ResumePhi) && "Expected a phi"); + IVEndValues[WideIVR] = ResumePhi->getOperand(0); + ScalarPhiIRI->addOperand(ResumePhi); + continue; + } + // TODO: Also handle truncated inductions here. Computing end-values + // separately should be done as VPlan-to-VPlan optimization, after + // legalizing all resume values to use the last lane from the loop. + assert(cast<VPWidenIntOrFpInductionRecipe>(VectorPhiR)->getTruncInst() && + "should only skip truncated wide inductions"); + continue; + } + + // The backedge value provides the value to resume coming out of a loop, + // which for FORs is a vector whose last element needs to be extracted. The + // start value provides the value if the loop is bypassed. + bool IsFOR = isa<VPFirstOrderRecurrencePHIRecipe>(VectorPhiR); + auto *ResumeFromVectorLoop = VectorPhiR->getBackedgeValue(); + assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() && + "Cannot handle loops with uncountable early exits"); + if (IsFOR) + ResumeFromVectorLoop = MiddleBuilder.createNaryOp( + VPInstruction::ExtractLastElement, {ResumeFromVectorLoop}, {}, + "vector.recur.extract"); + StringRef Name = IsFOR ? "scalar.recur.init" : "bc.merge.rdx"; + auto *ResumePhiR = ScalarPHBuilder.createScalarPhi( + {ResumeFromVectorLoop, VectorPhiR->getStartValue()}, {}, Name); + ScalarPhiIRI->addOperand(ResumePhiR); + } +} + +void VPlanTransforms::addExitUsersForFirstOrderRecurrences(VPlan &Plan, + VFRange &Range) { + VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion(); + auto *ScalarPHVPBB = Plan.getScalarPreheader(); + auto *MiddleVPBB = Plan.getMiddleBlock(); + VPBuilder ScalarPHBuilder(ScalarPHVPBB); + VPBuilder MiddleBuilder(MiddleVPBB, MiddleVPBB->getFirstNonPhi()); + + auto IsScalableOne = [](ElementCount VF) -> bool { + return VF == ElementCount::getScalable(1); + }; + + for (auto &HeaderPhi : VectorRegion->getEntryBasicBlock()->phis()) { + auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&HeaderPhi); + if (!FOR) + continue; + + assert(VectorRegion->getSingleSuccessor() == Plan.getMiddleBlock() && + "Cannot handle loops with uncountable early exits"); + + // This is the second phase of vectorizing first-order recurrences, creating + // extract for users outside the loop. An overview of the transformation is + // described below. Suppose we have the following loop with some use after + // the loop of the last a[i-1], + // + // for (int i = 0; i < n; ++i) { + // t = a[i - 1]; + // b[i] = a[i] - t; + // } + // use t; + // + // There is a first-order recurrence on "a". For this loop, the shorthand + // scalar IR looks like: + // + // scalar.ph: + // s.init = a[-1] + // br scalar.body + // + // scalar.body: + // i = phi [0, scalar.ph], [i+1, scalar.body] + // s1 = phi [s.init, scalar.ph], [s2, scalar.body] + // s2 = a[i] + // b[i] = s2 - s1 + // br cond, scalar.body, exit.block + // + // exit.block: + // use = lcssa.phi [s1, scalar.body] + // + // In this example, s1 is a recurrence because it's value depends on the + // previous iteration. In the first phase of vectorization, we created a + // VPFirstOrderRecurrencePHIRecipe v1 for s1. Now we create the extracts + // for users in the scalar preheader and exit block. + // + // vector.ph: + // v_init = vector(..., ..., ..., a[-1]) + // br vector.body + // + // vector.body + // i = phi [0, vector.ph], [i+4, vector.body] + // v1 = phi [v_init, vector.ph], [v2, vector.body] + // v2 = a[i, i+1, i+2, i+3] + // b[i] = v2 - v1 + // // Next, third phase will introduce v1' = splice(v1(3), v2(0, 1, 2)) + // b[i, i+1, i+2, i+3] = v2 - v1 + // br cond, vector.body, middle.block + // + // middle.block: + // vector.recur.extract.for.phi = v2(2) + // vector.recur.extract = v2(3) + // br cond, scalar.ph, exit.block + // + // scalar.ph: + // scalar.recur.init = phi [vector.recur.extract, middle.block], + // [s.init, otherwise] + // br scalar.body + // + // scalar.body: + // i = phi [0, scalar.ph], [i+1, scalar.body] + // s1 = phi [scalar.recur.init, scalar.ph], [s2, scalar.body] + // s2 = a[i] + // b[i] = s2 - s1 + // br cond, scalar.body, exit.block + // + // exit.block: + // lo = lcssa.phi [s1, scalar.body], + // [vector.recur.extract.for.phi, middle.block] + // + // Now update VPIRInstructions modeling LCSSA phis in the exit block. + // Extract the penultimate value of the recurrence and use it as operand for + // the VPIRInstruction modeling the phi. + for (VPUser *U : FOR->users()) { + using namespace llvm::VPlanPatternMatch; + if (!match(U, m_ExtractLastElement(m_Specific(FOR)))) + continue; + // For VF vscale x 1, if vscale = 1, we are unable to extract the + // penultimate value of the recurrence. Instead we rely on the existing + // extract of the last element from the result of + // VPInstruction::FirstOrderRecurrenceSplice. + // TODO: Consider vscale_range info and UF. + if (LoopVectorizationPlanner::getDecisionAndClampRange(IsScalableOne, + Range)) + return; + VPValue *PenultimateElement = MiddleBuilder.createNaryOp( + VPInstruction::ExtractPenultimateElement, {FOR->getBackedgeValue()}, + {}, "vector.recur.extract.for.phi"); + cast<VPInstruction>(U)->replaceAllUsesWith(PenultimateElement); + } + } +} diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index 2f00e51..5a8a2bb 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -363,6 +363,19 @@ struct VPlanTransforms { static void addBranchWeightToMiddleTerminator(VPlan &Plan, ElementCount VF, std::optional<unsigned> VScaleForTuning); + + /// Create resume phis in the scalar preheader for first-order recurrences, + /// reductions and inductions, and update the VPIRInstructions wrapping the + /// original phis in the scalar header. End values for inductions are added to + /// \p IVEndValues. + static void addScalarResumePhis(VPlan &Plan, VPRecipeBuilder &Builder, + DenseMap<VPValue *, VPValue *> &IVEndValues); + + /// Handle users in the exit block for first order reductions in the original + /// exit block. The penultimate value of recurrences is fed to their LCSSA phi + /// users in the original exit block using the VPIRInstruction wrapping to the + /// LCSSA phi. + static void addExitUsersForFirstOrderRecurrences(VPlan &Plan, VFRange &Range); }; } // namespace llvm diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll new file mode 100644 index 0000000..b178a56 --- /dev/null +++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/test_counters.ll @@ -0,0 +1,65 @@ +; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-vulkan-library %s -o - | FileCheck %s +; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-vulkan-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %} + +; ModuleID = 'test_counters.hlsl' +source_filename = "test_counters.hlsl" + +; CHECK: OpCapability Int8 +; CHECK-DAG: OpName [[OutputBuffer:%[0-9]+]] "OutputBuffer" +; CHECK-DAG: OpName [[InputBuffer:%[0-9]+]] "InputBuffer" +; CHECK-DAG: OpName [[OutputBufferCounter:%[0-9]+]] "OutputBuffer.counter" +; CHECK-DAG: OpName [[InputBufferCounter:%[0-9]+]] "InputBuffer.counter" +; CHECK-DAG: OpDecorate [[OutputBuffer]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[OutputBuffer]] Binding 10 +; CHECK-DAG: OpDecorate [[OutputBufferCounter]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[OutputBufferCounter]] Binding 0 +; CHECK-DAG: OpDecorate [[InputBuffer]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[InputBuffer]] Binding 1 +; CHECK-DAG: OpDecorate [[InputBufferCounter]] DescriptorSet 0 +; CHECK-DAG: OpDecorate [[InputBufferCounter]] Binding 2 +; CHECK-DAG: [[int:%[0-9]+]] = OpTypeInt 32 0 +; CHECK-DAG: [[zero:%[0-9]+]] = OpConstant [[int]] 0{{$}} +; CHECK-DAG: [[one:%[0-9]+]] = OpConstant [[int]] 1{{$}} +; CHECK-DAG: [[minus_one:%[0-9]+]] = OpConstant [[int]] 4294967295 +; CHECK: [[OutputBufferHandle:%[0-9]+]] = OpCopyObject {{%[0-9]+}} [[OutputBuffer]] +; CHECK: [[InputBufferHandle:%[0-9]+]] = OpCopyObject {{%[0-9]+}} [[InputBuffer]] +; CHECK: [[InputCounterAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[InputBufferCounter]] [[zero]] +; CHECK: [[dec:%[0-9]+]] = OpAtomicIAdd [[int]] [[InputCounterAC]] [[one]] [[zero]] [[minus_one]] +; CHECK: [[iadd:%[0-9]+]] = OpIAdd [[int]] [[dec]] [[minus_one]] +; CHECK: [[OutputCounterAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[OutputBufferCounter]] [[zero]] +; CHECK: [[inc:%[0-9]+]] = OpAtomicIAdd [[int]] [[OutputCounterAC]] [[one]] [[zero]] [[one]] +; CHECK: [[InputAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[InputBufferHandle]] [[zero]] [[iadd]] +; CHECK: [[load:%[0-9]+]] = OpLoad {{%[0-9]+}} [[InputAC]] +; CHECK: [[OutputAC:%[0-9]+]] = OpAccessChain {{%[0-9]+}} [[OutputBufferHandle]] [[zero]] [[inc]] +; CHECK: OpStore [[OutputAC]] [[load]] + + +target triple = "spirv1.6-unknown-vulkan1.3-compute" + +@.str = private unnamed_addr constant [13 x i8] c"OutputBuffer\00" +@.str.2 = private unnamed_addr constant [12 x i8] c"InputBuffer\00" + +define void @main() #0 { +entry: + %0 = call target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0f32_12_1t(i32 0, i32 10, i32 1, i32 0, ptr @.str) + %1 = call target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %0, i32 0, i32 0) + %2 = call target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0f32_12_1t(i32 1, i32 0, i32 1, i32 0, ptr @.str.2) + %3 = call target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %2, i32 2, i32 0) + %4 = call i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1) %3, i8 -1) + %5 = call i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1) %1, i8 1) + %6 = call ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %2, i32 %4) + %7 = load float, ptr addrspace(11) %6 + %8 = call ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1) %0, i32 %5) + store float %7, ptr addrspace(11) %8 + ret void +} + +declare target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0f32_12_1t(i32, i32, i32, i32, ptr) #1 +declare target("spirv.VulkanBuffer", i32, 12, 1) @llvm.spv.resource.counterhandlefromimplicitbinding.tspirv.VulkanBuffer_i32_12_1t.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1), i32, i32) #1 +declare target("spirv.VulkanBuffer", [0 x float], 12, 1) @llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_a0f32_12_1t(i32, i32, i32, i32, ptr) #1 +declare i32 @llvm.spv.resource.updatecounter.tspirv.VulkanBuffer_i32_12_1t(target("spirv.VulkanBuffer", i32, 12, 1), i8) #2 +declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0f32_12_1t(target("spirv.VulkanBuffer", [0 x float], 12, 1), i32) #1 + +attributes #0 = { "hlsl.shader"="compute" "hlsl.numthreads"="1,1,1" } +attributes #1 = { memory(none) } +attributes #2 = { memory(argmem: readwrite, inaccessiblemem: readwrite) } diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll index 3349d31..b2064b1 100644 --- a/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll +++ b/llvm/test/CodeGen/X86/x86-shrink-wrap-unwind.ll @@ -317,13 +317,13 @@ define void @with_nounwind(i1 %cond) nounwind personality ptr @my_personality { ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq ; CHECK-NEXT: LBB4_1: ## %throw -; CHECK-NEXT: Ltmp0: +; CHECK-NEXT: Ltmp0: ## EH_LABEL ; CHECK-NEXT: callq _throw_exception -; CHECK-NEXT: Ltmp1: +; CHECK-NEXT: Ltmp1: ## EH_LABEL ; CHECK-NEXT: ## %bb.2: ## %unreachable ; CHECK-NEXT: ud2 ; CHECK-NEXT: LBB4_3: ## %landing -; CHECK-NEXT: Ltmp2: +; CHECK-NEXT: Ltmp2: ## EH_LABEL ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq ; CHECK-NEXT: Lfunc_end0: @@ -340,12 +340,12 @@ define void @with_nounwind(i1 %cond) nounwind personality ptr @my_personality { ; NOCOMPACTUNWIND-NEXT: retq ; NOCOMPACTUNWIND-NEXT: .LBB4_1: # %throw ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16 -; NOCOMPACTUNWIND-NEXT: .Ltmp0: +; NOCOMPACTUNWIND-NEXT: .Ltmp0: # EH_LABEL ; NOCOMPACTUNWIND-NEXT: callq throw_exception@PLT -; NOCOMPACTUNWIND-NEXT: .Ltmp1: +; NOCOMPACTUNWIND-NEXT: .Ltmp1: # EH_LABEL ; NOCOMPACTUNWIND-NEXT: # %bb.2: # %unreachable ; NOCOMPACTUNWIND-NEXT: .LBB4_3: # %landing -; NOCOMPACTUNWIND-NEXT: .Ltmp2: +; NOCOMPACTUNWIND-NEXT: .Ltmp2: # EH_LABEL ; NOCOMPACTUNWIND-NEXT: popq %rax ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 8 ; NOCOMPACTUNWIND-NEXT: retq @@ -379,9 +379,9 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers ; CHECK-NEXT: ## %bb.1: ## %throw ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: Ltmp3: +; CHECK-NEXT: Ltmp3: ## EH_LABEL ; CHECK-NEXT: callq _throw_exception -; CHECK-NEXT: Ltmp4: +; CHECK-NEXT: Ltmp4: ## EH_LABEL ; CHECK-NEXT: LBB5_3: ## %fallthrough ; CHECK-NEXT: ## InlineAsm Start ; CHECK-NEXT: nop @@ -390,7 +390,7 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers ; CHECK-NEXT: LBB5_4: ## %return ; CHECK-NEXT: retq ; CHECK-NEXT: LBB5_2: ## %landing -; CHECK-NEXT: Ltmp5: +; CHECK-NEXT: Ltmp5: ## EH_LABEL ; CHECK-NEXT: jmp LBB5_3 ; CHECK-NEXT: Lfunc_end1: ; @@ -401,9 +401,9 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers ; NOCOMPACTUNWIND-NEXT: # %bb.1: # %throw ; NOCOMPACTUNWIND-NEXT: pushq %rax ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16 -; NOCOMPACTUNWIND-NEXT: .Ltmp3: +; NOCOMPACTUNWIND-NEXT: .Ltmp3: # EH_LABEL ; NOCOMPACTUNWIND-NEXT: callq throw_exception@PLT -; NOCOMPACTUNWIND-NEXT: .Ltmp4: +; NOCOMPACTUNWIND-NEXT: .Ltmp4: # EH_LABEL ; NOCOMPACTUNWIND-NEXT: .LBB5_3: # %fallthrough ; NOCOMPACTUNWIND-NEXT: #APP ; NOCOMPACTUNWIND-NEXT: nop @@ -414,7 +414,7 @@ define void @with_nounwind_same_succ(i1 %cond) nounwind personality ptr @my_pers ; NOCOMPACTUNWIND-NEXT: retq ; NOCOMPACTUNWIND-NEXT: .LBB5_2: # %landing ; NOCOMPACTUNWIND-NEXT: .cfi_def_cfa_offset 16 -; NOCOMPACTUNWIND-NEXT: .Ltmp5: +; NOCOMPACTUNWIND-NEXT: .Ltmp5: # EH_LABEL ; NOCOMPACTUNWIND-NEXT: jmp .LBB5_3 entry: br i1 %cond, label %throw, label %return diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll index 655db54..a079203 100644 --- a/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/strided-loads-with-external-indices.ll @@ -10,14 +10,10 @@ define void @test() { ; CHECK-NEXT: [[SUB4_I_I65_US:%.*]] = or i64 0, 1 ; CHECK-NEXT: br label [[BODY:%.*]] ; CHECK: body: -; CHECK-NEXT: [[ADD_I_I62_US:%.*]] = shl i64 0, 0 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> <i64 poison, i64 1>, i64 [[ADD_I_I62_US]], i32 0 -; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i64> zeroinitializer, [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [[CLASS_A:%.*]], <2 x ptr> zeroinitializer, <2 x i64> [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> [[TMP2]], i32 4, <2 x i1> splat (i1 true), <2 x i32> poison) -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i32> [[TMP3]], i32 1 -; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP4]], [[TMP5]] +; CHECK-NEXT: [[TMP0:%.*]] = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> getelementptr ([[CLASS_A:%.*]], <2 x ptr> zeroinitializer, <2 x i64> <i64 0, i64 1>), i32 4, <2 x i1> splat (i1 true), <2 x i32> poison) +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i32> [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i32> [[TMP0]], i32 1 +; CHECK-NEXT: [[CMP_I_I_I_I67_US:%.*]] = icmp slt i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[SPEC_SELECT_I_I68_US:%.*]] = select i1 false, i64 [[SUB4_I_I65_US]], i64 0 ; CHECK-NEXT: br label [[BODY]] ; diff --git a/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll b/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll index 7758596..87f2cca 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/ext-used-scalar-different-bitwidth.ll @@ -8,8 +8,8 @@ define i32 @test() { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: store i32 152, ptr @f, align 4 ; CHECK-NEXT: [[AGG_TMP_SROA_0_0_COPYLOAD_I:%.*]] = load i32, ptr @f, align 4 -; CHECK-NEXT: [[ADD_I_I:%.*]] = shl i32 [[AGG_TMP_SROA_0_0_COPYLOAD_I]], 24 -; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i32> <i32 poison, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080>, i32 [[ADD_I_I]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i32> <i32 poison, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080, i32 83886080>, i32 [[AGG_TMP_SROA_0_0_COPYLOAD_I]], i32 0 +; CHECK-NEXT: [[TMP0:%.*]] = shl <8 x i32> [[TMP3]], <i32 24, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0> ; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i32> <i32 83886080, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = ashr <8 x i32> [[TMP1]], splat (i32 24) ; CHECK-NEXT: [[TMP5:%.*]] = and <8 x i32> [[TMP2]], <i32 66440127, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll index 75aec45..3e0a374 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/vect_copyable_in_binops.ll @@ -247,32 +247,12 @@ entry: } define void @shl0(ptr noalias %dst, ptr noalias %src) { -; NON-POW2-LABEL: @shl0( -; NON-POW2-NEXT: entry: -; NON-POW2-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1 -; NON-POW2-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4 -; NON-POW2-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1 -; NON-POW2-NEXT: store i32 [[TMP0]], ptr [[DST]], align 4 -; NON-POW2-NEXT: [[TMP1:%.*]] = load <3 x i32>, ptr [[INCDEC_PTR]], align 4 -; NON-POW2-NEXT: [[TMP2:%.*]] = shl <3 x i32> [[TMP1]], <i32 1, i32 2, i32 3> -; NON-POW2-NEXT: store <3 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4 -; NON-POW2-NEXT: ret void -; -; POW2-ONLY-LABEL: @shl0( -; POW2-ONLY-NEXT: entry: -; POW2-ONLY-NEXT: [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i64 1 -; POW2-ONLY-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4 -; POW2-ONLY-NEXT: [[INCDEC_PTR1:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 1 -; POW2-ONLY-NEXT: store i32 [[TMP0]], ptr [[DST]], align 4 -; POW2-ONLY-NEXT: [[INCDEC_PTR4:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 3 -; POW2-ONLY-NEXT: [[INCDEC_PTR6:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 3 -; POW2-ONLY-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr [[INCDEC_PTR]], align 4 -; POW2-ONLY-NEXT: [[TMP2:%.*]] = shl <2 x i32> [[TMP1]], <i32 1, i32 2> -; POW2-ONLY-NEXT: store <2 x i32> [[TMP2]], ptr [[INCDEC_PTR1]], align 4 -; POW2-ONLY-NEXT: [[TMP3:%.*]] = load i32, ptr [[INCDEC_PTR4]], align 4 -; POW2-ONLY-NEXT: [[SHL8:%.*]] = shl i32 [[TMP3]], 3 -; POW2-ONLY-NEXT: store i32 [[SHL8]], ptr [[INCDEC_PTR6]], align 4 -; POW2-ONLY-NEXT: ret void +; CHECK-LABEL: @shl0( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = shl <4 x i32> [[TMP0]], <i32 0, i32 1, i32 2, i32 3> +; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[DST:%.*]], align 4 +; CHECK-NEXT: ret void ; entry: %incdec.ptr = getelementptr inbounds i32, ptr %src, i64 1 diff --git a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll index a5b1e9b..769b360 100644 --- a/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll +++ b/llvm/test/Transforms/SLPVectorizer/bool-logical-op-reduction-with-poison.ll @@ -1,25 +1,44 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 -; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %} -; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %} +; RUN: %if x86-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X86 %} +; RUN: %if aarch64-registered-target %{ opt -S --passes=slp-vectorizer < %s -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefix=AARCH64 %} define i1 @test(i32 %0, i32 %1, i32 %p) { -; CHECK-LABEL: define i1 @test( -; CHECK-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) { -; CHECK-NEXT: entry: -; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 -; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0 -; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer -; CHECK-NEXT: [[CMP6:%.*]] = icmp slt i32 0, [[P]] -; CHECK-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 true, i1 [[CMP6]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]] -; CHECK-NEXT: [[TMP8:%.*]] = freeze i1 [[OP_RDX]] -; CHECK-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP8]], i1 true, i1 [[OP_RDX1]] -; CHECK-NEXT: ret i1 [[OP_RDX2]] +; X86-LABEL: define i1 @test( +; X86-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) { +; X86-NEXT: entry: +; X86-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 +; X86-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0 +; X86-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> zeroinitializer +; X86-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]] +; X86-NEXT: [[TMP5:%.*]] = icmp slt <4 x i32> [[TMP4]], zeroinitializer +; X86-NEXT: [[CMP6:%.*]] = icmp slt i32 0, [[P]] +; X86-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP5]] +; X86-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP6]]) +; X86-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 true, i1 [[CMP6]] +; X86-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]] +; X86-NEXT: [[TMP8:%.*]] = freeze i1 [[OP_RDX]] +; X86-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP8]], i1 true, i1 [[OP_RDX1]] +; X86-NEXT: ret i1 [[OP_RDX2]] +; +; AARCH64-LABEL: define i1 @test( +; AARCH64-SAME: i32 [[TMP0:%.*]], i32 [[TMP1:%.*]], i32 [[P:%.*]]) { +; AARCH64-NEXT: entry: +; AARCH64-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP0]], 0 +; AARCH64-NEXT: [[SHL4:%.*]] = shl i32 0, [[TMP1]] +; AARCH64-NEXT: [[CMP5:%.*]] = icmp slt i32 [[SHL4]], 0 +; AARCH64-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> <i32 0, i32 poison, i32 poison, i32 poison>, i32 [[TMP1]], i32 1 +; AARCH64-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 1> +; AARCH64-NEXT: [[TMP4:%.*]] = shl <4 x i32> zeroinitializer, [[TMP3]] +; AARCH64-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[P]], i32 0 +; AARCH64-NEXT: [[TMP6:%.*]] = icmp slt <4 x i32> [[TMP4]], [[TMP5]] +; AARCH64-NEXT: [[TMP7:%.*]] = freeze <4 x i1> [[TMP6]] +; AARCH64-NEXT: [[TMP8:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP7]]) +; AARCH64-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP8]], i1 true, i1 [[CMP5]] +; AARCH64-NEXT: [[OP_RDX1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP1]] +; AARCH64-NEXT: [[TMP9:%.*]] = freeze i1 [[OP_RDX]] +; AARCH64-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP9]], i1 true, i1 [[OP_RDX1]] +; AARCH64-NEXT: ret i1 [[OP_RDX2]] ; entry: %cmp1 = icmp sgt i32 %0, 0 diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp index 9b61540..50fca56 100644 --- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp +++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp @@ -1118,10 +1118,7 @@ StringRef getTypeMangling(Type type, bool isSigned) { llvm_unreachable("Unsupported integer width"); } }) - .Default([](auto) { - llvm_unreachable("No mangling defined"); - return ""; - }); + .DefaultUnreachable("No mangling defined"); } template <typename ReduceOp> diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp index c8efdf0..24c33f9 100644 --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp @@ -987,7 +987,7 @@ void SPIRVDialect::printType(Type type, DialectAsmPrinter &os) const { .Case<ArrayType, CooperativeMatrixType, PointerType, RuntimeArrayType, ImageType, SampledImageType, StructType, MatrixType, TensorArmType>( [&](auto type) { print(type, os); }) - .Default([](Type) { llvm_unreachable("unhandled SPIR-V type"); }); + .DefaultUnreachable("Unhandled SPIR-V type"); } //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp index 7e9a80e..f895807 100644 --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVTypes.cpp @@ -57,7 +57,7 @@ public: for (Type elementType : concreteType.getElementTypes()) add(elementType); }) - .Default([](SPIRVType) { llvm_unreachable("Unhandled type"); }); + .DefaultUnreachable("Unhandled type"); } void add(Type type) { add(cast<SPIRVType>(type)); } @@ -107,7 +107,7 @@ public: for (Type elementType : concreteType.getElementTypes()) add(elementType); }) - .Default([](SPIRVType) { llvm_unreachable("Unhandled type"); }); + .DefaultUnreachable("Unhandled type"); } void add(Type type) { add(cast<SPIRVType>(type)); } @@ -198,8 +198,7 @@ Type CompositeType::getElementType(unsigned index) const { .Case<MatrixType>([](MatrixType type) { return type.getColumnType(); }) .Case<StructType>( [index](StructType type) { return type.getElementType(index); }) - .Default( - [](Type) -> Type { llvm_unreachable("invalid composite type"); }); + .DefaultUnreachable("Invalid composite type"); } unsigned CompositeType::getNumElements() const { @@ -207,9 +206,7 @@ unsigned CompositeType::getNumElements() const { .Case<ArrayType, StructType, TensorArmType, VectorType>( [](auto type) { return type.getNumElements(); }) .Case<MatrixType>([](MatrixType type) { return type.getNumColumns(); }) - .Default([](SPIRVType) -> unsigned { - llvm_unreachable("Invalid type for number of elements query"); - }); + .DefaultUnreachable("Invalid type for number of elements query"); } bool CompositeType::hasCompileTimeKnownNumElements() const { diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp index 122f61e0..88e1ab6 100644 --- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp @@ -622,7 +622,7 @@ static spirv::Dim convertRank(int64_t rank) { } static spirv::ImageFormat getImageFormat(Type elementType) { - return llvm::TypeSwitch<Type, spirv::ImageFormat>(elementType) + return TypeSwitch<Type, spirv::ImageFormat>(elementType) .Case<Float16Type>([](Float16Type) { return spirv::ImageFormat::R16f; }) .Case<Float32Type>([](Float32Type) { return spirv::ImageFormat::R32f; }) .Case<IntegerType>([](IntegerType intType) { @@ -639,11 +639,7 @@ static spirv::ImageFormat getImageFormat(Type elementType) { llvm_unreachable("Unhandled integer type!"); } }) - .Default([](Type) { - llvm_unreachable("Unhandled element type!"); - // We need to return something here to satisfy the type switch. - return spirv::ImageFormat::R32f; - }); + .DefaultUnreachable("Unhandled element type!"); #undef BIT_WIDTH_CASE } diff --git a/openmp/runtime/test/transform/tile/intfor.F90 b/openmp/runtime/test/transform/tile/do.F90 index 4ca9f14..74aa54b 100644 --- a/openmp/runtime/test/transform/tile/intfor.F90 +++ b/openmp/runtime/test/transform/tile/do.F90 @@ -2,14 +2,14 @@ ! It is done 3 times corresponding to every possible fraction of the last ! iteration before passing beyond UB. -! RUN: %flang %flags %openmp_flags -fopenmp-version=51 -DUB=16 %s -o %t-ub16.exe -! RUN: %flang %flags %openmp_flags -fopenmp-version=51 -DUB=17 %s -o %t-ub17.exe -! RUN: %flang %flags %openmp_flags -fopenmp-version=51 -DUB=18 %s -o %t-ub18.exe +! RUN: %flang %flags %openmp_flags -fopenmp-version=51 -cpp -DUB=16 %s -o %t-ub16.exe +! RUN: %flang %flags %openmp_flags -fopenmp-version=51 -cpp -DUB=17 %s -o %t-ub17.exe +! RUN: %flang %flags %openmp_flags -fopenmp-version=51 -cpp -DUB=18 %s -o %t-ub18.exe ! RUN: %t-ub16.exe | FileCheck %s --match-full-lines ! RUN: %t-ub17.exe | FileCheck %s --match-full-lines ! RUN: %t-ub18.exe | FileCheck %s --match-full-lines -program tile_intfor_1d +program tile_do_1d implicit none integer i print *, 'do' diff --git a/openmp/runtime/test/transform/tile/intfor_2d.f90 b/openmp/runtime/test/transform/tile/do_2d.f90 index 6bc90c7..162bed0 100644 --- a/openmp/runtime/test/transform/tile/intfor_2d.f90 +++ b/openmp/runtime/test/transform/tile/do_2d.f90 @@ -4,7 +4,8 @@ ! RUN: %t.exe | FileCheck %s --match-full-lines -program tile_intfor_2d +program tile_do_2d + implicit none integer i, j print *, 'do' diff --git a/openmp/runtime/test/transform/tile/intfor_2d_varsizes.F90 b/openmp/runtime/test/transform/tile/do_2d_varsizes.f90 index 4cb5adf..7d60ad0 100644 --- a/openmp/runtime/test/transform/tile/intfor_2d_varsizes.F90 +++ b/openmp/runtime/test/transform/tile/do_2d_varsizes.f90 @@ -3,7 +3,8 @@ ! RUN: %flang %flags %openmp_flags -fopenmp-version=51 %s -o %t.exe ! RUN: %t.exe | FileCheck %s --match-full-lines -program tile_intfor_varsizes +program tile_do_2d_varsizes + implicit none integer i call kernel(7,17,3,2) diff --git a/openmp/runtime/test/transform/unroll/heuristic_intdo.f90 b/openmp/runtime/test/transform/unroll/heuristic_do.f90 index d0ef938..c646e52 100644 --- a/openmp/runtime/test/transform/unroll/heuristic_intdo.f90 +++ b/openmp/runtime/test/transform/unroll/heuristic_do.f90 @@ -4,7 +4,8 @@ ! RUN: %t.exe | FileCheck %s --match-full-lines -program unroll_heuristic +program unroll_heuristic_do + implicit none integer :: i print *, 'do' diff --git a/openmp/tools/omptest/src/OmptTester.cpp b/openmp/tools/omptest/src/OmptTester.cpp index afa96ac..1a83f62 100644 --- a/openmp/tools/omptest/src/OmptTester.cpp +++ b/openmp/tools/omptest/src/OmptTester.cpp @@ -390,7 +390,7 @@ int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num, register_ompt_callback(ompt_callback_parallel_begin); register_ompt_callback(ompt_callback_parallel_end); register_ompt_callback(ompt_callback_work); - // register_ompt_callback(ompt_callback_dispatch); + register_ompt_callback(ompt_callback_dispatch); register_ompt_callback(ompt_callback_task_create); // register_ompt_callback(ompt_callback_dependences); // register_ompt_callback(ompt_callback_task_dependence); diff --git a/orc-rt/include/orc-rt/SPSWrapperFunction.h b/orc-rt/include/orc-rt/SPSWrapperFunction.h index 3ed3295..e5ed14f 100644 --- a/orc-rt/include/orc-rt/SPSWrapperFunction.h +++ b/orc-rt/include/orc-rt/SPSWrapperFunction.h @@ -33,47 +33,61 @@ private: return std::move(R); } - template <typename T> static const T &toSerializable(const T &Arg) noexcept { - return Arg; - } + template <typename T> struct Serializable { + typedef std::decay_t<T> serializable_type; + static const T &to(const T &Arg) noexcept { return Arg; } + static T &&from(T &&Arg) noexcept { return std::forward<T>(Arg); } + }; - static SPSSerializableError toSerializable(Error Err) noexcept { - return SPSSerializableError(std::move(Err)); - } + template <typename T> struct Serializable<T *> { + typedef ExecutorAddr serializable_type; + static ExecutorAddr to(T *Arg) { return ExecutorAddr::fromPtr(Arg); } + static T *from(ExecutorAddr A) { return A.toPtr<T *>(); } + }; - template <typename T> - static SPSSerializableExpected<T> toSerializable(Expected<T> Arg) noexcept { - return SPSSerializableExpected<T>(std::move(Arg)); - } + template <> struct Serializable<Error> { + typedef SPSSerializableError serializable_type; + static SPSSerializableError to(Error Err) { + return SPSSerializableError(std::move(Err)); + } + static Error from(SPSSerializableError Err) { return Err.toError(); } + }; + + template <typename T> struct Serializable<Expected<T>> { + typedef SPSSerializableExpected<T> serializable_type; + static SPSSerializableExpected<T> to(Expected<T> Val) { + return SPSSerializableExpected<T>(std::move(Val)); + } + static Expected<T> from(SPSSerializableExpected<T> Val) { + return Val.toExpected(); + } + }; template <typename... Ts> struct DeserializableTuple; template <typename... Ts> struct DeserializableTuple<std::tuple<Ts...>> { - typedef std::tuple< - std::decay_t<decltype(toSerializable(std::declval<Ts>()))>...> - type; + typedef std::tuple<typename Serializable<Ts>::serializable_type...> type; }; template <typename... Ts> using DeserializableTuple_t = typename DeserializableTuple<Ts...>::type; - template <typename T> static T &&fromSerializable(T &&Arg) noexcept { - return std::forward<T>(Arg); - } - - static Error fromSerializable(SPSSerializableError Err) noexcept { - return Err.toError(); - } - - template <typename T> - static Expected<T> fromSerializable(SPSSerializableExpected<T> Val) noexcept { - return Val.toExpected(); + template <typename ArgTuple, typename... SerializableArgs, std::size_t... Is> + std::optional<ArgTuple> + applySerializationConversions(std::tuple<SerializableArgs...> &Inputs, + std::index_sequence<Is...>) { + static_assert(sizeof...(SerializableArgs) == + std::index_sequence<Is...>::size(), + "Tuple sizes don't match"); + return std::optional<ArgTuple>( + std::in_place, Serializable<std::tuple_element_t<Is, ArgTuple>>::from( + std::move(std::get<Is>(Inputs)))...); } public: template <typename... ArgTs> std::optional<WrapperFunctionBuffer> serialize(ArgTs &&...Args) { - return serializeImpl(toSerializable(std::forward<ArgTs>(Args))...); + return serializeImpl(Serializable<ArgTs>::to(std::forward<ArgTs>(Args))...); } template <typename ArgTuple> @@ -85,12 +99,8 @@ public: if (!SPSSerializationTraits<SPSTuple<SPSArgTs...>, decltype(Args)>::deserialize(IB, Args)) return std::nullopt; - return std::apply( - [](auto &&...A) { - return std::optional<ArgTuple>(std::in_place, - std::move(fromSerializable(A))...); - }, - std::move(Args)); + return applySerializationConversions<ArgTuple>( + Args, std::make_index_sequence<std::tuple_size_v<ArgTuple>>()); } }; diff --git a/orc-rt/unittests/SPSWrapperFunctionTest.cpp b/orc-rt/unittests/SPSWrapperFunctionTest.cpp index e010e2a..7f88ce0 100644 --- a/orc-rt/unittests/SPSWrapperFunctionTest.cpp +++ b/orc-rt/unittests/SPSWrapperFunctionTest.cpp @@ -82,7 +82,7 @@ static void void_noop_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, [](move_only_function<void()> Return) { Return(); }); } -TEST(SPSWrapperFunctionUtilsTest, TestVoidNoop) { +TEST(SPSWrapperFunctionUtilsTest, VoidNoop) { bool Ran = false; SPSWrapperFunction<void()>::call(DirectCaller(nullptr, void_noop_sps_wrapper), [&](Error Err) { @@ -102,7 +102,7 @@ static void add_via_lambda_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, }); } -TEST(SPSWrapperFunctionUtilsTest, TestBinaryOpViaLambda) { +TEST(SPSWrapperFunctionUtilsTest, BinaryOpViaLambda) { int32_t Result = 0; SPSWrapperFunction<int32_t(int32_t, int32_t)>::call( DirectCaller(nullptr, add_via_lambda_sps_wrapper), @@ -123,7 +123,7 @@ add_via_function_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, Session, CallCtx, Return, ArgBytes, add_via_function); } -TEST(SPSWrapperFunctionUtilsTest, TestBinaryOpViaFunction) { +TEST(SPSWrapperFunctionUtilsTest, BinaryOpViaFunction) { int32_t Result = 0; SPSWrapperFunction<int32_t(int32_t, int32_t)>::call( DirectCaller(nullptr, add_via_function_sps_wrapper), @@ -139,7 +139,7 @@ add_via_function_pointer_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, Session, CallCtx, Return, ArgBytes, &add_via_function); } -TEST(SPSWrapperFunctionUtilsTest, TestBinaryOpViaFunctionPointer) { +TEST(SPSWrapperFunctionUtilsTest, BinaryOpViaFunctionPointer) { int32_t Result = 0; SPSWrapperFunction<int32_t(int32_t, int32_t)>::call( DirectCaller(nullptr, add_via_function_pointer_sps_wrapper), @@ -161,7 +161,7 @@ static void improbable_feat_sps_wrapper(orc_rt_SessionRef Session, }); } -TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningErrorSuccessCase) { +TEST(SPSWrapperFunctionUtilsTest, TransparentConversionErrorSuccessCase) { bool DidRun = false; SPSWrapperFunction<SPSError(bool)>::call( DirectCaller(nullptr, improbable_feat_sps_wrapper), @@ -174,7 +174,7 @@ TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningErrorSuccessCase) { EXPECT_TRUE(DidRun); } -TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningErrorFailureCase) { +TEST(SPSWrapperFunctionUtilsTest, TransparentConversionErrorFailureCase) { std::string ErrMsg; SPSWrapperFunction<SPSError(bool)>::call( DirectCaller(nullptr, improbable_feat_sps_wrapper), @@ -197,7 +197,7 @@ static void halve_number_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, }); } -TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningExpectedSuccessCase) { +TEST(SPSWrapperFunctionUtilsTest, TransparentConversionExpectedSuccessCase) { int32_t Result = 0; SPSWrapperFunction<SPSExpected<int32_t>(int32_t)>::call( DirectCaller(nullptr, halve_number_sps_wrapper), @@ -209,7 +209,7 @@ TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningExpectedSuccessCase) { EXPECT_EQ(Result, 1); } -TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningExpectedFailureCase) { +TEST(SPSWrapperFunctionUtilsTest, TransparentConversionExpectedFailureCase) { std::string ErrMsg; SPSWrapperFunction<SPSExpected<int32_t>(int32_t)>::call( DirectCaller(nullptr, halve_number_sps_wrapper), @@ -221,6 +221,27 @@ TEST(SPSWrapperFunctionUtilsTest, TestFunctionReturningExpectedFailureCase) { EXPECT_EQ(ErrMsg, "N is not a multiple of 2"); } +static void +round_trip_int_pointer_sps_wrapper(orc_rt_SessionRef Session, void *CallCtx, + orc_rt_WrapperFunctionReturn Return, + orc_rt_WrapperFunctionBuffer ArgBytes) { + SPSWrapperFunction<SPSExecutorAddr(SPSExecutorAddr)>::handle( + Session, CallCtx, Return, ArgBytes, + [](move_only_function<void(int32_t *)> Return, int32_t *P) { + Return(P); + }); +} + +TEST(SPSWrapperFunctionUtilsTest, TransparentSerializationPointers) { + int X = 42; + int *P = nullptr; + SPSWrapperFunction<SPSExecutorAddr(SPSExecutorAddr)>::call( + DirectCaller(nullptr, round_trip_int_pointer_sps_wrapper), + [&](Expected<int32_t *> R) { P = cantFail(std::move(R)); }, &X); + + EXPECT_EQ(P, &X); +} + template <size_t N> struct SPSOpCounter {}; namespace orc_rt { @@ -249,7 +270,7 @@ handle_with_reference_types_sps_wrapper(orc_rt_SessionRef Session, OpCounter<3> &&) { Return(); }); } -TEST(SPSWrapperFunctionUtilsTest, TestHandlerWithReferences) { +TEST(SPSWrapperFunctionUtilsTest, HandlerWithReferences) { // Test that we can handle by-value, by-ref, by-const-ref, and by-rvalue-ref // arguments, and that we generate the expected number of moves. OpCounter<0>::reset(); |