diff options
74 files changed, 12645 insertions, 5645 deletions
diff --git a/.ci/monolithic-linux.sh b/.ci/monolithic-linux.sh index 84da045..75729b3 100755 --- a/.ci/monolithic-linux.sh +++ b/.ci/monolithic-linux.sh @@ -65,12 +65,12 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \ start-group "ninja" # Targets are not escaped as they are passed as separate arguments. -ninja -C "${BUILD_DIR}" -k 0 ${targets} +ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log if [[ "${runtime_targets}" != "" ]]; then start-group "ninja Runtimes" - ninja -C "${BUILD_DIR}" ${runtime_targets} + ninja -C "${BUILD_DIR}" ${runtime_targets} |& tee ninja_runtimes.log fi # Compiling runtimes with just-built Clang and running their tests @@ -85,7 +85,8 @@ if [[ "${runtime_targets_needs_reconfig}" != "" ]]; then start-group "ninja Runtimes C++26" - ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig} + ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig} \ + |& tee ninja_runtimes_needs_reconfig1.log start-group "CMake Runtimes Clang Modules" @@ -96,5 +97,6 @@ if [[ "${runtime_targets_needs_reconfig}" != "" ]]; then start-group "ninja Runtimes Clang Modules" - ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig} + ninja -C "${BUILD_DIR}" ${runtime_targets_needs_reconfig} \ + |& tee ninja_runtimes_needs_reconfig2.log fi diff --git a/.ci/monolithic-windows.sh b/.ci/monolithic-windows.sh index b30faa4..0f3a199 100755 --- a/.ci/monolithic-windows.sh +++ b/.ci/monolithic-windows.sh @@ -51,4 +51,4 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \ start-group "ninja" # Targets are not escaped as they are passed as separate arguments. -ninja -C "${BUILD_DIR}" -k 0 ${targets} +ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log diff --git a/bolt/include/bolt/Profile/DataAggregator.h b/bolt/include/bolt/Profile/DataAggregator.h index cb1b87f..db0f690 100644 --- a/bolt/include/bolt/Profile/DataAggregator.h +++ b/bolt/include/bolt/Profile/DataAggregator.h @@ -502,9 +502,6 @@ private: /// entries). void imputeFallThroughs(); - /// Register profiled functions for lite mode. - void registerProfiledFunctions(); - /// Debugging dump methods void dump() const; void dump(const PerfBranchSample &Sample) const; diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp index c13fa6d..3604fdd 100644 --- a/bolt/lib/Profile/DataAggregator.cpp +++ b/bolt/lib/Profile/DataAggregator.cpp @@ -581,26 +581,6 @@ void DataAggregator::imputeFallThroughs() { outs() << "BOLT-INFO: imputed " << InferredTraces << " traces\n"; } -void DataAggregator::registerProfiledFunctions() { - DenseSet<uint64_t> Addrs; - for (const auto &Trace : llvm::make_first_range(Traces)) { - if (Trace.Branch != Trace::FT_ONLY && - Trace.Branch != Trace::FT_EXTERNAL_ORIGIN) - Addrs.insert(Trace.Branch); - Addrs.insert(Trace.From); - } - - for (const auto [PC, _] : BasicSamples) - Addrs.insert(PC); - - for (const PerfMemSample &MemSample : MemSamples) - Addrs.insert(MemSample.PC); - - for (const uint64_t Addr : Addrs) - if (BinaryFunction *Func = getBinaryFunctionContainingAddress(Addr)) - Func->setHasProfileAvailable(); -} - Error DataAggregator::preprocessProfile(BinaryContext &BC) { this->BC = &BC; @@ -623,7 +603,6 @@ Error DataAggregator::preprocessProfile(BinaryContext &BC) { exit(0); } - registerProfiledFunctions(); return Error::success(); } @@ -1368,6 +1347,10 @@ std::error_code DataAggregator::parseAggregatedLBREntry() { } const uint64_t FromOffset = Addr[0]->Offset; + BinaryFunction *FromFunc = getBinaryFunctionContainingAddress(FromOffset); + if (FromFunc) + FromFunc->setHasProfileAvailable(); + int64_t Count = Counters[0]; int64_t Mispreds = Counters[1]; @@ -1378,6 +1361,11 @@ std::error_code DataAggregator::parseAggregatedLBREntry() { return std::error_code(); } + const uint64_t ToOffset = Addr[1]->Offset; + BinaryFunction *ToFunc = getBinaryFunctionContainingAddress(ToOffset); + if (ToFunc) + ToFunc->setHasProfileAvailable(); + /// For fall-through types, adjust locations to match Trace container. if (Type == FT || Type == FT_EXTERNAL_ORIGIN || Type == FT_EXTERNAL_RETURN) { Addr[2] = Location(Addr[1]->Offset); // Trace To @@ -1625,6 +1613,9 @@ std::error_code DataAggregator::parseBranchEvents() { Traces.reserve(TraceMap.size()); for (const auto &[Trace, Info] : TraceMap) { Traces.emplace_back(Trace, Info); + for (const uint64_t Addr : {Trace.Branch, Trace.From}) + if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Addr)) + BF->setHasProfileAvailable(); } clear(TraceMap); @@ -1685,6 +1676,9 @@ std::error_code DataAggregator::parseBasicEvents() { continue; ++NumTotalSamples; + if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Sample->PC)) + BF->setHasProfileAvailable(); + ++BasicSamples[Sample->PC]; EventNames.insert(Sample->EventName); } @@ -1722,6 +1716,9 @@ std::error_code DataAggregator::parseMemEvents() { if (std::error_code EC = Sample.getError()) return EC; + if (BinaryFunction *BF = getBinaryFunctionContainingAddress(Sample->PC)) + BF->setHasProfileAvailable(); + MemSamples.emplace_back(std::move(Sample.get())); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h index 51ac1b6..b39eb15 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -113,6 +113,24 @@ class SizeClassAllocator64 { // ~(uptr)0. void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); + + uptr MaxAddr = GetMaxUserVirtualAddress(); + // VReport does not call the sanitizer allocator. + VReport(3, "Max user virtual address: 0x%zx\n", MaxAddr); + VReport(3, "Total space size for primary allocator: 0x%zx\n", + TotalSpaceSize); + // TODO: revise the check if we ever configure sanitizers to deliberately + // map beyond the 2**48 barrier (note that Linux pretends the VMA is + // limited to 48-bit for backwards compatibility, but allows apps to + // explicitly specify an address beyond that). + if (heap_start + TotalSpaceSize >= MaxAddr) { + // We can't easily adjust the requested heap size, because kSpaceSize is + // const (for optimization) and used throughout the code. + VReport(0, "Error: heap size %zx exceeds max user virtual address %zx\n", + TotalSpaceSize, MaxAddr); + VReport( + 0, "Try using a kernel that allows a larger virtual address space\n"); + } PremappedHeap = heap_start != 0; if (PremappedHeap) { CHECK(!kUsingConstantSpaceBeg); diff --git a/libc/include/dlfcn.yaml b/libc/include/dlfcn.yaml index db2893a..bf17a11 100644 --- a/libc/include/dlfcn.yaml +++ b/libc/include/dlfcn.yaml @@ -46,7 +46,7 @@ enums: standards: - gnu value: 2 - - name: RTLD_DI_CONFIGADDR, + - name: RTLD_DI_CONFIGADDR standards: - gnu value: 3 @@ -127,5 +127,5 @@ functions: - POSIX return_type: int arguments: - - type: const void * - - type: Dl_info * + - type: const void *__restrict + - type: Dl_info *__restrict diff --git a/libc/src/dlfcn/dladdr.cpp b/libc/src/dlfcn/dladdr.cpp index 61490fd..3db68b4 100644 --- a/libc/src/dlfcn/dladdr.cpp +++ b/libc/src/dlfcn/dladdr.cpp @@ -14,7 +14,8 @@ namespace LIBC_NAMESPACE_DECL { // TODO: https:// github.com/llvm/llvm-project/issues/97929 -LLVM_LIBC_FUNCTION(int, dladdr, (const void *addr, Dl_info *info)) { +LLVM_LIBC_FUNCTION(int, dladdr, + (const void *__restrict addr, Dl_info *__restrict info)) { return -1; } diff --git a/libc/src/dlfcn/dladdr.h b/libc/src/dlfcn/dladdr.h index 346fc8d..abbc9a9 100644 --- a/libc/src/dlfcn/dladdr.h +++ b/libc/src/dlfcn/dladdr.h @@ -13,7 +13,7 @@ namespace LIBC_NAMESPACE_DECL { -int dladdr(const void *, Dl_info *); +int dladdr(const void *__restrict, Dl_info *__restrict); } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/dlfcn/dlinfo.cpp b/libc/src/dlfcn/dlinfo.cpp index d78cade..e1938d1 100644 --- a/libc/src/dlfcn/dlinfo.cpp +++ b/libc/src/dlfcn/dlinfo.cpp @@ -16,7 +16,8 @@ namespace LIBC_NAMESPACE_DECL { // TODO: https://github.com/llvm/llvm-project/issues/149911 LLVM_LIBC_FUNCTION(int, dlinfo, - (void *restrict handle, int request, void *restrict info)) { + (void *__restrict handle, int request, + void *__restrict info)) { return -1; } diff --git a/libc/src/dlfcn/dlinfo.h b/libc/src/dlfcn/dlinfo.h index c2c34f0..bc13152 100644 --- a/libc/src/dlfcn/dlinfo.h +++ b/libc/src/dlfcn/dlinfo.h @@ -13,7 +13,7 @@ namespace LIBC_NAMESPACE_DECL { -int dlinfo(void *restrict, int, void *restrict); +int dlinfo(void *__restrict, int, void *__restrict); } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/dlfcn/dlsym.cpp b/libc/src/dlfcn/dlsym.cpp index c075c20..dc0da7d 100644 --- a/libc/src/dlfcn/dlsym.cpp +++ b/libc/src/dlfcn/dlsym.cpp @@ -14,6 +14,8 @@ namespace LIBC_NAMESPACE_DECL { // TODO(@izaakschroeder): https://github.com/llvm/llvm-project/issues/97920 -LLVM_LIBC_FUNCTION(void *, dlsym, (void *, const char *)) { return nullptr; } +LLVM_LIBC_FUNCTION(void *, dlsym, (void *__restrict, const char *__restrict)) { + return nullptr; +} } // namespace LIBC_NAMESPACE_DECL diff --git a/libc/src/dlfcn/dlsym.h b/libc/src/dlfcn/dlsym.h index 70c6ab3..f879792 100644 --- a/libc/src/dlfcn/dlsym.h +++ b/libc/src/dlfcn/dlsym.h @@ -13,7 +13,7 @@ namespace LIBC_NAMESPACE_DECL { -void *dlsym(void *, const char *); +void *dlsym(void *__restrict, const char *__restrict); } // namespace LIBC_NAMESPACE_DECL diff --git a/llvm/include/llvm/BinaryFormat/DXContainer.h b/llvm/include/llvm/BinaryFormat/DXContainer.h index 89abca0..cc4af3d 100644 --- a/llvm/include/llvm/BinaryFormat/DXContainer.h +++ b/llvm/include/llvm/BinaryFormat/DXContainer.h @@ -16,6 +16,7 @@ #include "llvm/ADT/BitmaskEnum.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/DXILABI.h" #include "llvm/Support/Error.h" #include "llvm/Support/SwapByteOrder.h" #include "llvm/TargetParser/Triple.h" @@ -157,6 +158,8 @@ enum class FeatureFlags : uint64_t { static_assert((uint64_t)FeatureFlags::NextUnusedBit <= 1ull << 63, "Shader flag bits exceed enum size."); +LLVM_ABI ArrayRef<EnumEntry<llvm::dxil::ResourceClass>> getResourceClasses(); + #define ROOT_SIGNATURE_FLAG(Num, Val) Val = Num, enum class RootFlags : uint32_t { #include "DXContainerConstants.def" diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h index b681ea8..f706591 100644 --- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h +++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h @@ -31,6 +31,7 @@ namespace llvm { class CanonicalLoopInfo; +class ScanInfo; struct TargetRegionEntryInfo; class OffloadEntriesInfoManager; class OpenMPIRBuilder; @@ -707,6 +708,9 @@ public: LLVM_ABI InsertPointOrErrorTy createCancellationPoint( const LocationDescription &Loc, omp::Directive CanceledDirective); + /// Creates a ScanInfo object, allocates and returns the pointer. + Expected<ScanInfo *> scanInfoInitialize(); + /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. @@ -750,6 +754,42 @@ public: LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); + /// Generator for the control flow structure of an OpenMP canonical loops if + /// the parent directive has an `inscan` modifier specified. + /// If the `inscan` modifier is specified, the region of the parent is + /// expected to have a `scan` directive. Based on the clauses in + /// scan directive, the body of the loop is split into two loops: Input loop + /// and Scan Loop. Input loop contains the code generated for input phase of + /// scan and Scan loop contains the code generated for scan phase of scan. + /// From the bodyGen callback of these loops, `createScan` would be called + /// when a scan directive is encountered from the loop body. `createScan` + /// based on whether 1. inclusive or exclusive scan is specified and, 2. input + /// loop or scan loop is generated, lowers the body of the for loop + /// accordingly. + /// + /// \param Loc The insert and source location description. + /// \param BodyGenCB Callback that will generate the loop body code. + /// \param Start Value of the loop counter for the first iterations. + /// \param Stop Loop counter values past this will stop the loop. + /// \param Step Loop counter increment after each iteration; negative + /// means counting down. + /// \param IsSigned Whether Start, Stop and Step are signed integers. + /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop + /// counter. + /// \param ComputeIP Insertion point for instructions computing the trip + /// count. Can be used to ensure the trip count is available + /// at the outermost loop of a loop nest. If not set, + /// defaults to the preheader of the generated loop. + /// \param Name Base name used to derive BB and instruction names. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + /// + /// \returns A vector containing Loop Info of Input Loop and Scan Loop. + Expected<SmallVector<llvm::CanonicalLoopInfo *>> createCanonicalScanLoops( + const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, + Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, + InsertPointTy ComputeIP, const Twine &Name, ScanInfo *ScanRedInfo); + /// Calculate the trip count of a canonical loop. /// /// This allows specifying user-defined loop counter values using increment, @@ -818,13 +858,17 @@ public: /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. + /// \param InScan Whether loop has a scan reduction specified. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. LLVM_ABI Expected<CanonicalLoopInfo *> createCanonicalLoop( const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, - InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); + InsertPointTy ComputeIP = {}, const Twine &Name = "loop", + bool InScan = false, ScanInfo *ScanRedInfo = nullptr); /// Collapse a loop nest into a single loop. /// @@ -1556,6 +1600,47 @@ private: ArrayRef<OpenMPIRBuilder::ReductionInfo> ReductionInfos, Function *ReduceFn, AttributeList FuncAttrs); + /// Helper function for CreateCanonicalScanLoops to create InputLoop + /// in the firstGen and Scan Loop in the SecondGen + /// \param InputLoopGen Callback for generating the loop for input phase + /// \param ScanLoopGen Callback for generating the loop for scan phase + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + /// + /// \return error if any produced, else return success. + Error emitScanBasedDirectiveIR( + llvm::function_ref<Error()> InputLoopGen, + llvm::function_ref<Error(LocationDescription Loc)> ScanLoopGen, + ScanInfo *ScanRedInfo); + + /// Creates the basic blocks required for scan reduction. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + void createScanBBs(ScanInfo *ScanRedInfo); + + /// Dynamically allocates the buffer needed for scan reduction. + /// \param AllocaIP The IP where possibly-shared pointer of buffer needs to + /// be declared. + /// \param ScanVars Scan Variables. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + /// + /// \return error if any produced, else return success. + Error emitScanBasedDirectiveDeclsIR(InsertPointTy AllocaIP, + ArrayRef<llvm::Value *> ScanVars, + ArrayRef<llvm::Type *> ScanVarsType, + ScanInfo *ScanRedInfo); + + /// Copies the result back to the reduction variable. + /// \param ReductionInfos Array type containing the ReductionOps. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + /// + /// \return error if any produced, else return success. + Error emitScanBasedDirectiveFinalsIR( + ArrayRef<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos, + ScanInfo *ScanInfo); + /// This function emits a helper that gathers Reduce lists from the first /// lane of every active warp to lanes in the first warp. /// @@ -2184,6 +2269,9 @@ public: /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; + /// Collection of owned ScanInfo objects that eventually need to be free'd. + std::forward_list<ScanInfo> ScanInfos; + /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } @@ -2639,6 +2727,48 @@ public: FinalizeCallbackTy FiniCB, Value *Filter); + /// This function performs the scan reduction of the values updated in + /// the input phase. The reduction logic needs to be emitted between input + /// and scan loop returned by `CreateCanonicalScanLoops`. The following + /// is the code that is generated, `buffer` and `span` are expected to be + /// populated before executing the generated code. + /// \code{c} + /// for (int k = 0; k != ceil(log2(span)); ++k) { + /// i=pow(2,k) + /// for (size cnt = last_iter; cnt >= i; --cnt) + /// buffer[cnt] op= buffer[cnt-i]; + /// } + /// \endcode + /// \param Loc The insert and source location description. + /// \param ReductionInfos Array type containing the ReductionOps. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + /// + /// \returns The insertion position *after* the masked. + InsertPointOrErrorTy emitScanReduction( + const LocationDescription &Loc, + ArrayRef<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos, + ScanInfo *ScanRedInfo); + + /// This directive split and directs the control flow to input phase + /// blocks or scan phase blocks based on 1. whether input loop or scan loop + /// is executed, 2. whether exclusive or inclusive scan is used. + /// + /// \param Loc The insert and source location description. + /// \param AllocaIP The IP where the temporary buffer for scan reduction + // needs to be allocated. + /// \param ScanVars Scan Variables. + /// \param IsInclusive Whether it is an inclusive or exclusive scan. + /// \param ScanRedInfo Pointer to the ScanInfo objected created using + /// `ScanInfoInitialize`. + /// + /// \returns The insertion position *after* the scan. + InsertPointOrErrorTy createScan(const LocationDescription &Loc, + InsertPointTy AllocaIP, + ArrayRef<llvm::Value *> ScanVars, + ArrayRef<llvm::Type *> ScanVarsType, + bool IsInclusive, ScanInfo *ScanRedInfo); + /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. @@ -3779,6 +3909,93 @@ public: LLVM_ABI void invalidate(); }; +/// ScanInfo holds the information to assist in lowering of Scan reduction. +/// Before lowering, the body of the for loop specifying scan reduction is +/// expected to have the following structure +/// +/// Loop Body Entry +/// | +/// Code before the scan directive +/// | +/// Scan Directive +/// | +/// Code after the scan directive +/// | +/// Loop Body Exit +/// When `createCanonicalScanLoops` is executed, the bodyGen callback of it +/// transforms the body to: +/// +/// Loop Body Entry +/// | +/// OMPScanDispatch +/// +/// OMPBeforeScanBlock +/// | +/// OMPScanLoopExit +/// | +/// Loop Body Exit +/// +/// The insert point is updated to the first insert point of OMPBeforeScanBlock. +/// It dominates the control flow of code generated until +/// scan directive is encountered and OMPAfterScanBlock dominates the +/// control flow of code generated after scan is encountered. The successor +/// of OMPScanDispatch can be OMPBeforeScanBlock or OMPAfterScanBlock based +/// on 1.whether it is in Input phase or Scan Phase , 2. whether it is an +/// exclusive or inclusive scan. This jump is added when `createScan` is +/// executed. If input loop is being generated, if it is inclusive scan, +/// `OMPAfterScanBlock` succeeds `OMPScanDispatch` , if exclusive, +/// `OMPBeforeScanBlock` succeeds `OMPDispatch` and vice versa for scan loop. At +/// the end of the input loop, temporary buffer is populated and at the +/// beginning of the scan loop, temporary buffer is read. After scan directive +/// is encountered, insertion point is updated to `OMPAfterScanBlock` as it is +/// expected to dominate the code after the scan directive. Both Before and +/// After scan blocks are succeeded by `OMPScanLoopExit`. +/// Temporary buffer allocations are done in `ScanLoopInit` block before the +/// lowering of for-loop. The results are copied back to reduction variable in +/// `ScanLoopFinish` block. +class ScanInfo { +public: + /// Dominates the body of the loop before scan directive + llvm::BasicBlock *OMPBeforeScanBlock = nullptr; + + /// Dominates the body of the loop before scan directive + llvm::BasicBlock *OMPAfterScanBlock = nullptr; + + /// Controls the flow to before or after scan blocks + llvm::BasicBlock *OMPScanDispatch = nullptr; + + /// Exit block of loop body + llvm::BasicBlock *OMPScanLoopExit = nullptr; + + /// Block before loop body where scan initializations are done + llvm::BasicBlock *OMPScanInit = nullptr; + + /// Block after loop body where scan finalizations are done + llvm::BasicBlock *OMPScanFinish = nullptr; + + /// If true, it indicates Input phase is lowered; else it indicates + /// ScanPhase is lowered + bool OMPFirstScanLoop = false; + + /// Maps the private reduction variable to the pointer of the temporary + /// buffer + llvm::SmallDenseMap<llvm::Value *, llvm::Value *> *ScanBuffPtrs; + + /// Keeps track of value of iteration variable for input/scan loop to be + /// used for Scan directive lowering + llvm::Value *IV; + + /// Stores the span of canonical loop being lowered to be used for temporary + /// buffer allocation or Finalization. + llvm::Value *Span; + + ScanInfo() { + ScanBuffPtrs = new llvm::SmallDenseMap<llvm::Value *, llvm::Value *>(); + } + + ~ScanInfo() { delete (ScanBuffPtrs); } +}; + } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H diff --git a/llvm/lib/BinaryFormat/DXContainer.cpp b/llvm/lib/BinaryFormat/DXContainer.cpp index 36d10d0..eb83945 100644 --- a/llvm/lib/BinaryFormat/DXContainer.cpp +++ b/llvm/lib/BinaryFormat/DXContainer.cpp @@ -60,6 +60,17 @@ ArrayRef<EnumEntry<SigComponentType>> dxbc::getSigComponentTypes() { return ArrayRef(SigComponentTypes); } +static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = { + {"SRV", llvm::dxil::ResourceClass::SRV}, + {"UAV", llvm::dxil::ResourceClass::UAV}, + {"CBV", llvm::dxil::ResourceClass::CBuffer}, + {"Sampler", llvm::dxil::ResourceClass::Sampler}, +}; + +ArrayRef<EnumEntry<llvm::dxil::ResourceClass>> dxbc::getResourceClasses() { + return ArrayRef(ResourceClassNames); +} + static const EnumEntry<RootFlags> RootFlagNames[] = { #define ROOT_SIGNATURE_FLAG(Val, Enum) {#Enum, RootFlags::Enum}, #include "llvm/BinaryFormat/DXContainerConstants.def" diff --git a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp index 79904fc..574883e 100644 --- a/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp +++ b/llvm/lib/Frontend/HLSL/HLSLRootSignature.cpp @@ -92,16 +92,9 @@ static raw_ostream &operator<<(raw_ostream &OS, return OS; } -static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = { - {"CBV", dxil::ResourceClass::CBuffer}, - {"SRV", dxil::ResourceClass::SRV}, - {"UAV", dxil::ResourceClass::UAV}, - {"Sampler", dxil::ResourceClass::Sampler}, -}; - static raw_ostream &operator<<(raw_ostream &OS, const ClauseType &Type) { OS << enumToStringRef(dxil::ResourceClass(llvm::to_underlying(Type)), - ArrayRef(ResourceClassNames)); + dxbc::getResourceClasses()); return OS; } diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp index 9cf4ed1..1cda308 100644 --- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp +++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp @@ -51,13 +51,6 @@ static std::optional<StringRef> extractMdStringValue(MDNode *Node, return NodeText->getString(); } -static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = { - {"CBV", dxil::ResourceClass::CBuffer}, - {"SRV", dxil::ResourceClass::SRV}, - {"UAV", dxil::ResourceClass::UAV}, - {"Sampler", dxil::ResourceClass::Sampler}, -}; - namespace { // We use the OverloadVisit with std::visit to ensure the compiler catches if a @@ -128,7 +121,7 @@ MDNode *MetadataBuilder::BuildRootDescriptor(const RootDescriptor &Descriptor) { IRBuilder<> Builder(Ctx); StringRef ResName = enumToStringRef(dxil::ResourceClass(to_underlying(Descriptor.Type)), - ArrayRef(ResourceClassNames)); + dxbc::getResourceClasses()); assert(!ResName.empty() && "Provided an invalid Resource Class"); SmallString<7> Name({"Root", ResName}); Metadata *Operands[] = { @@ -170,7 +163,7 @@ MDNode *MetadataBuilder::BuildDescriptorTableClause( IRBuilder<> Builder(Ctx); StringRef ResName = enumToStringRef(dxil::ResourceClass(to_underlying(Clause.Type)), - ArrayRef(ResourceClassNames)); + dxbc::getResourceClasses()); assert(!ResName.empty() && "Provided an invalid Resource Class"); Metadata *Operands[] = { MDString::get(Ctx, ResName), diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 260d3c2..ea027e4 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -4014,6 +4014,340 @@ OpenMPIRBuilder::createMasked(const LocationDescription &Loc, /*Conditional*/ true, /*hasFinalize*/ true); } +static llvm::CallInst *emitNoUnwindRuntimeCall(IRBuilder<> &Builder, + llvm::FunctionCallee Callee, + ArrayRef<llvm::Value *> Args, + const llvm::Twine &Name) { + llvm::CallInst *Call = Builder.CreateCall( + Callee, Args, SmallVector<llvm::OperandBundleDef, 1>(), Name); + Call->setDoesNotThrow(); + return Call; +} + +// Expects input basic block is dominated by BeforeScanBB. +// Once Scan directive is encountered, the code after scan directive should be +// dominated by AfterScanBB. Scan directive splits the code sequence to +// scan and input phase. Based on whether inclusive or exclusive +// clause is used in the scan directive and whether input loop or scan loop +// is lowered, it adds jumps to input and scan phase. First Scan loop is the +// input loop and second is the scan loop. The code generated handles only +// inclusive scans now. +OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createScan( + const LocationDescription &Loc, InsertPointTy AllocaIP, + ArrayRef<llvm::Value *> ScanVars, ArrayRef<llvm::Type *> ScanVarsType, + bool IsInclusive, ScanInfo *ScanRedInfo) { + if (ScanRedInfo->OMPFirstScanLoop) { + llvm::Error Err = emitScanBasedDirectiveDeclsIR(AllocaIP, ScanVars, + ScanVarsType, ScanRedInfo); + if (Err) + return Err; + } + if (!updateToLocation(Loc)) + return Loc.IP; + + llvm::Value *IV = ScanRedInfo->IV; + + if (ScanRedInfo->OMPFirstScanLoop) { + // Emit buffer[i] = red; at the end of the input phase. + for (size_t i = 0; i < ScanVars.size(); i++) { + Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]]; + Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr); + Type *DestTy = ScanVarsType[i]; + Value *Val = Builder.CreateInBoundsGEP(DestTy, Buff, IV, "arrayOffset"); + Value *Src = Builder.CreateLoad(DestTy, ScanVars[i]); + + Builder.CreateStore(Src, Val); + } + } + Builder.CreateBr(ScanRedInfo->OMPScanLoopExit); + emitBlock(ScanRedInfo->OMPScanDispatch, + Builder.GetInsertBlock()->getParent()); + + if (!ScanRedInfo->OMPFirstScanLoop) { + IV = ScanRedInfo->IV; + // Emit red = buffer[i]; at the entrance to the scan phase. + // TODO: if exclusive scan, the red = buffer[i-1] needs to be updated. + for (size_t i = 0; i < ScanVars.size(); i++) { + Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]]; + Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr); + Type *DestTy = ScanVarsType[i]; + Value *SrcPtr = + Builder.CreateInBoundsGEP(DestTy, Buff, IV, "arrayOffset"); + Value *Src = Builder.CreateLoad(DestTy, SrcPtr); + Builder.CreateStore(Src, ScanVars[i]); + } + } + + // TODO: Update it to CreateBr and remove dead blocks + llvm::Value *CmpI = Builder.getInt1(true); + if (ScanRedInfo->OMPFirstScanLoop == IsInclusive) { + Builder.CreateCondBr(CmpI, ScanRedInfo->OMPBeforeScanBlock, + ScanRedInfo->OMPAfterScanBlock); + } else { + Builder.CreateCondBr(CmpI, ScanRedInfo->OMPAfterScanBlock, + ScanRedInfo->OMPBeforeScanBlock); + } + emitBlock(ScanRedInfo->OMPAfterScanBlock, + Builder.GetInsertBlock()->getParent()); + Builder.SetInsertPoint(ScanRedInfo->OMPAfterScanBlock); + return Builder.saveIP(); +} + +Error OpenMPIRBuilder::emitScanBasedDirectiveDeclsIR( + InsertPointTy AllocaIP, ArrayRef<Value *> ScanVars, + ArrayRef<Type *> ScanVarsType, ScanInfo *ScanRedInfo) { + + Builder.restoreIP(AllocaIP); + // Create the shared pointer at alloca IP. + for (size_t i = 0; i < ScanVars.size(); i++) { + llvm::Value *BuffPtr = + Builder.CreateAlloca(Builder.getPtrTy(), nullptr, "vla"); + (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]] = BuffPtr; + } + + // Allocate temporary buffer by master thread + auto BodyGenCB = [&](InsertPointTy AllocaIP, + InsertPointTy CodeGenIP) -> Error { + Builder.restoreIP(CodeGenIP); + Value *AllocSpan = + Builder.CreateAdd(ScanRedInfo->Span, Builder.getInt32(1)); + for (size_t i = 0; i < ScanVars.size(); i++) { + Type *IntPtrTy = Builder.getInt32Ty(); + Constant *Allocsize = ConstantExpr::getSizeOf(ScanVarsType[i]); + Allocsize = ConstantExpr::getTruncOrBitCast(Allocsize, IntPtrTy); + Value *Buff = Builder.CreateMalloc(IntPtrTy, ScanVarsType[i], Allocsize, + AllocSpan, nullptr, "arr"); + Builder.CreateStore(Buff, (*(ScanRedInfo->ScanBuffPtrs))[ScanVars[i]]); + } + return Error::success(); + }; + // TODO: Perform finalization actions for variables. This has to be + // called for variables which have destructors/finalizers. + auto FiniCB = [&](InsertPointTy CodeGenIP) { return llvm::Error::success(); }; + + Builder.SetInsertPoint(ScanRedInfo->OMPScanInit->getTerminator()); + llvm::Value *FilterVal = Builder.getInt32(0); + llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP = + createMasked(Builder.saveIP(), BodyGenCB, FiniCB, FilterVal); + + if (!AfterIP) + return AfterIP.takeError(); + Builder.restoreIP(*AfterIP); + BasicBlock *InputBB = Builder.GetInsertBlock(); + if (InputBB->getTerminator()) + Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator()); + AfterIP = createBarrier(Builder.saveIP(), llvm::omp::OMPD_barrier); + if (!AfterIP) + return AfterIP.takeError(); + Builder.restoreIP(*AfterIP); + + return Error::success(); +} + +Error OpenMPIRBuilder::emitScanBasedDirectiveFinalsIR( + ArrayRef<ReductionInfo> ReductionInfos, ScanInfo *ScanRedInfo) { + auto BodyGenCB = [&](InsertPointTy AllocaIP, + InsertPointTy CodeGenIP) -> Error { + Builder.restoreIP(CodeGenIP); + for (ReductionInfo RedInfo : ReductionInfos) { + Value *PrivateVar = RedInfo.PrivateVariable; + Value *OrigVar = RedInfo.Variable; + Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[PrivateVar]; + Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr); + + Type *SrcTy = RedInfo.ElementType; + Value *Val = Builder.CreateInBoundsGEP(SrcTy, Buff, ScanRedInfo->Span, + "arrayOffset"); + Value *Src = Builder.CreateLoad(SrcTy, Val); + + Builder.CreateStore(Src, OrigVar); + Builder.CreateFree(Buff); + } + return Error::success(); + }; + // TODO: Perform finalization actions for variables. This has to be + // called for variables which have destructors/finalizers. + auto FiniCB = [&](InsertPointTy CodeGenIP) { return llvm::Error::success(); }; + + if (ScanRedInfo->OMPScanFinish->getTerminator()) + Builder.SetInsertPoint(ScanRedInfo->OMPScanFinish->getTerminator()); + else + Builder.SetInsertPoint(ScanRedInfo->OMPScanFinish); + + llvm::Value *FilterVal = Builder.getInt32(0); + llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP = + createMasked(Builder.saveIP(), BodyGenCB, FiniCB, FilterVal); + + if (!AfterIP) + return AfterIP.takeError(); + Builder.restoreIP(*AfterIP); + BasicBlock *InputBB = Builder.GetInsertBlock(); + if (InputBB->getTerminator()) + Builder.SetInsertPoint(Builder.GetInsertBlock()->getTerminator()); + AfterIP = createBarrier(Builder.saveIP(), llvm::omp::OMPD_barrier); + if (!AfterIP) + return AfterIP.takeError(); + Builder.restoreIP(*AfterIP); + return Error::success(); +} + +OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::emitScanReduction( + const LocationDescription &Loc, + ArrayRef<llvm::OpenMPIRBuilder::ReductionInfo> ReductionInfos, + ScanInfo *ScanRedInfo) { + + if (!updateToLocation(Loc)) + return Loc.IP; + auto BodyGenCB = [&](InsertPointTy AllocaIP, + InsertPointTy CodeGenIP) -> Error { + Builder.restoreIP(CodeGenIP); + Function *CurFn = Builder.GetInsertBlock()->getParent(); + // for (int k = 0; k <= ceil(log2(n)); ++k) + llvm::BasicBlock *LoopBB = + BasicBlock::Create(CurFn->getContext(), "omp.outer.log.scan.body"); + llvm::BasicBlock *ExitBB = + splitBB(Builder, false, "omp.outer.log.scan.exit"); + llvm::Function *F = llvm::Intrinsic::getOrInsertDeclaration( + Builder.GetInsertBlock()->getModule(), + (llvm::Intrinsic::ID)llvm::Intrinsic::log2, Builder.getDoubleTy()); + llvm::BasicBlock *InputBB = Builder.GetInsertBlock(); + llvm::Value *Arg = + Builder.CreateUIToFP(ScanRedInfo->Span, Builder.getDoubleTy()); + llvm::Value *LogVal = emitNoUnwindRuntimeCall(Builder, F, Arg, ""); + F = llvm::Intrinsic::getOrInsertDeclaration( + Builder.GetInsertBlock()->getModule(), + (llvm::Intrinsic::ID)llvm::Intrinsic::ceil, Builder.getDoubleTy()); + LogVal = emitNoUnwindRuntimeCall(Builder, F, LogVal, ""); + LogVal = Builder.CreateFPToUI(LogVal, Builder.getInt32Ty()); + llvm::Value *NMin1 = Builder.CreateNUWSub( + ScanRedInfo->Span, + llvm::ConstantInt::get(ScanRedInfo->Span->getType(), 1)); + Builder.SetInsertPoint(InputBB); + Builder.CreateBr(LoopBB); + emitBlock(LoopBB, CurFn); + Builder.SetInsertPoint(LoopBB); + + PHINode *Counter = Builder.CreatePHI(Builder.getInt32Ty(), 2); + // size pow2k = 1; + PHINode *Pow2K = Builder.CreatePHI(Builder.getInt32Ty(), 2); + Counter->addIncoming(llvm::ConstantInt::get(Builder.getInt32Ty(), 0), + InputBB); + Pow2K->addIncoming(llvm::ConstantInt::get(Builder.getInt32Ty(), 1), + InputBB); + // for (size i = n - 1; i >= 2 ^ k; --i) + // tmp[i] op= tmp[i-pow2k]; + llvm::BasicBlock *InnerLoopBB = + BasicBlock::Create(CurFn->getContext(), "omp.inner.log.scan.body"); + llvm::BasicBlock *InnerExitBB = + BasicBlock::Create(CurFn->getContext(), "omp.inner.log.scan.exit"); + llvm::Value *CmpI = Builder.CreateICmpUGE(NMin1, Pow2K); + Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); + emitBlock(InnerLoopBB, CurFn); + Builder.SetInsertPoint(InnerLoopBB); + PHINode *IVal = Builder.CreatePHI(Builder.getInt32Ty(), 2); + IVal->addIncoming(NMin1, LoopBB); + for (ReductionInfo RedInfo : ReductionInfos) { + Value *ReductionVal = RedInfo.PrivateVariable; + Value *BuffPtr = (*(ScanRedInfo->ScanBuffPtrs))[ReductionVal]; + Value *Buff = Builder.CreateLoad(Builder.getPtrTy(), BuffPtr); + Type *DestTy = RedInfo.ElementType; + Value *IV = Builder.CreateAdd(IVal, Builder.getInt32(1)); + Value *LHSPtr = + Builder.CreateInBoundsGEP(DestTy, Buff, IV, "arrayOffset"); + Value *OffsetIval = Builder.CreateNUWSub(IV, Pow2K); + Value *RHSPtr = + Builder.CreateInBoundsGEP(DestTy, Buff, OffsetIval, "arrayOffset"); + Value *LHS = Builder.CreateLoad(DestTy, LHSPtr); + Value *RHS = Builder.CreateLoad(DestTy, RHSPtr); + llvm::Value *Result; + InsertPointOrErrorTy AfterIP = + RedInfo.ReductionGen(Builder.saveIP(), LHS, RHS, Result); + if (!AfterIP) + return AfterIP.takeError(); + Builder.CreateStore(Result, LHSPtr); + } + llvm::Value *NextIVal = Builder.CreateNUWSub( + IVal, llvm::ConstantInt::get(Builder.getInt32Ty(), 1)); + IVal->addIncoming(NextIVal, Builder.GetInsertBlock()); + CmpI = Builder.CreateICmpUGE(NextIVal, Pow2K); + Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB); + emitBlock(InnerExitBB, CurFn); + llvm::Value *Next = Builder.CreateNUWAdd( + Counter, llvm::ConstantInt::get(Counter->getType(), 1)); + Counter->addIncoming(Next, Builder.GetInsertBlock()); + // pow2k <<= 1; + llvm::Value *NextPow2K = Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true); + Pow2K->addIncoming(NextPow2K, Builder.GetInsertBlock()); + llvm::Value *Cmp = Builder.CreateICmpNE(Next, LogVal); + Builder.CreateCondBr(Cmp, LoopBB, ExitBB); + Builder.SetInsertPoint(ExitBB->getFirstInsertionPt()); + return Error::success(); + }; + + // TODO: Perform finalization actions for variables. This has to be + // called for variables which have destructors/finalizers. + auto FiniCB = [&](InsertPointTy CodeGenIP) { return llvm::Error::success(); }; + + llvm::Value *FilterVal = Builder.getInt32(0); + llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP = + createMasked(Builder.saveIP(), BodyGenCB, FiniCB, FilterVal); + + if (!AfterIP) + return AfterIP.takeError(); + Builder.restoreIP(*AfterIP); + AfterIP = createBarrier(Builder.saveIP(), llvm::omp::OMPD_barrier); + + if (!AfterIP) + return AfterIP.takeError(); + Builder.restoreIP(*AfterIP); + Error Err = emitScanBasedDirectiveFinalsIR(ReductionInfos, ScanRedInfo); + if (Err) + return Err; + + return AfterIP; +} + +Error OpenMPIRBuilder::emitScanBasedDirectiveIR( + llvm::function_ref<Error()> InputLoopGen, + llvm::function_ref<Error(LocationDescription Loc)> ScanLoopGen, + ScanInfo *ScanRedInfo) { + + { + // Emit loop with input phase: + // for (i: 0..<num_iters>) { + // <input phase>; + // buffer[i] = red; + // } + ScanRedInfo->OMPFirstScanLoop = true; + Error Err = InputLoopGen(); + if (Err) + return Err; + } + { + // Emit loop with scan phase: + // for (i: 0..<num_iters>) { + // red = buffer[i]; + // <scan phase>; + // } + ScanRedInfo->OMPFirstScanLoop = false; + Error Err = ScanLoopGen(Builder.saveIP()); + if (Err) + return Err; + } + return Error::success(); +} + +void OpenMPIRBuilder::createScanBBs(ScanInfo *ScanRedInfo) { + Function *Fun = Builder.GetInsertBlock()->getParent(); + ScanRedInfo->OMPScanDispatch = + BasicBlock::Create(Fun->getContext(), "omp.inscan.dispatch"); + ScanRedInfo->OMPAfterScanBlock = + BasicBlock::Create(Fun->getContext(), "omp.after.scan.bb"); + ScanRedInfo->OMPBeforeScanBlock = + BasicBlock::Create(Fun->getContext(), "omp.before.scan.bb"); + ScanRedInfo->OMPScanLoopExit = + BasicBlock::Create(Fun->getContext(), "omp.scan.loop.exit"); +} CanonicalLoopInfo *OpenMPIRBuilder::createLoopSkeleton( DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name) { @@ -4111,6 +4445,76 @@ OpenMPIRBuilder::createCanonicalLoop(const LocationDescription &Loc, return CL; } +Expected<ScanInfo *> OpenMPIRBuilder::scanInfoInitialize() { + ScanInfos.emplace_front(); + ScanInfo *Result = &ScanInfos.front(); + return Result; +} + +Expected<SmallVector<llvm::CanonicalLoopInfo *>> +OpenMPIRBuilder::createCanonicalScanLoops( + const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, + Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, + InsertPointTy ComputeIP, const Twine &Name, ScanInfo *ScanRedInfo) { + LocationDescription ComputeLoc = + ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; + updateToLocation(ComputeLoc); + + SmallVector<CanonicalLoopInfo *> Result; + + Value *TripCount = calculateCanonicalLoopTripCount( + ComputeLoc, Start, Stop, Step, IsSigned, InclusiveStop, Name); + ScanRedInfo->Span = TripCount; + ScanRedInfo->OMPScanInit = splitBB(Builder, true, "scan.init"); + Builder.SetInsertPoint(ScanRedInfo->OMPScanInit); + + auto BodyGen = [=](InsertPointTy CodeGenIP, Value *IV) { + Builder.restoreIP(CodeGenIP); + ScanRedInfo->IV = IV; + createScanBBs(ScanRedInfo); + BasicBlock *InputBlock = Builder.GetInsertBlock(); + Instruction *Terminator = InputBlock->getTerminator(); + assert(Terminator->getNumSuccessors() == 1); + BasicBlock *ContinueBlock = Terminator->getSuccessor(0); + Terminator->setSuccessor(0, ScanRedInfo->OMPScanDispatch); + emitBlock(ScanRedInfo->OMPBeforeScanBlock, + Builder.GetInsertBlock()->getParent()); + Builder.CreateBr(ScanRedInfo->OMPScanLoopExit); + emitBlock(ScanRedInfo->OMPScanLoopExit, + Builder.GetInsertBlock()->getParent()); + Builder.CreateBr(ContinueBlock); + Builder.SetInsertPoint( + ScanRedInfo->OMPBeforeScanBlock->getFirstInsertionPt()); + return BodyGenCB(Builder.saveIP(), IV); + }; + + const auto &&InputLoopGen = [&]() -> Error { + Expected<CanonicalLoopInfo *> LoopInfo = createCanonicalLoop( + Builder.saveIP(), BodyGen, Start, Stop, Step, IsSigned, InclusiveStop, + ComputeIP, Name, true, ScanRedInfo); + if (!LoopInfo) + return LoopInfo.takeError(); + Result.push_back(*LoopInfo); + Builder.restoreIP((*LoopInfo)->getAfterIP()); + return Error::success(); + }; + const auto &&ScanLoopGen = [&](LocationDescription Loc) -> Error { + Expected<CanonicalLoopInfo *> LoopInfo = + createCanonicalLoop(Loc, BodyGen, Start, Stop, Step, IsSigned, + InclusiveStop, ComputeIP, Name, true, ScanRedInfo); + if (!LoopInfo) + return LoopInfo.takeError(); + Result.push_back(*LoopInfo); + Builder.restoreIP((*LoopInfo)->getAfterIP()); + ScanRedInfo->OMPScanFinish = Builder.GetInsertBlock(); + return Error::success(); + }; + Error Err = emitScanBasedDirectiveIR(InputLoopGen, ScanLoopGen, ScanRedInfo); + if (Err) + return Err; + return Result; +} + Value *OpenMPIRBuilder::calculateCanonicalLoopTripCount( const LocationDescription &Loc, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, const Twine &Name) { @@ -4174,7 +4578,8 @@ Value *OpenMPIRBuilder::calculateCanonicalLoopTripCount( Expected<CanonicalLoopInfo *> OpenMPIRBuilder::createCanonicalLoop( const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, - InsertPointTy ComputeIP, const Twine &Name) { + InsertPointTy ComputeIP, const Twine &Name, bool InScan, + ScanInfo *ScanRedInfo) { LocationDescription ComputeLoc = ComputeIP.isSet() ? LocationDescription(ComputeIP, Loc.DL) : Loc; @@ -4185,6 +4590,8 @@ Expected<CanonicalLoopInfo *> OpenMPIRBuilder::createCanonicalLoop( Builder.restoreIP(CodeGenIP); Value *Span = Builder.CreateMul(IV, Step); Value *IndVar = Builder.CreateAdd(Span, Start); + if (InScan) + ScanRedInfo->IV = IndVar; return BodyGenCB(Builder.saveIP(), IndVar); }; LocationDescription LoopLoc = diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp index a6e4a63..40d960e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -5879,8 +5879,12 @@ AMDGPULegalizerInfo::splitBufferOffsets(MachineIRBuilder &B, const LLT S32 = LLT::scalar(32); MachineRegisterInfo &MRI = *B.getMRI(); - std::tie(BaseReg, ImmOffset) = - AMDGPU::getBaseWithConstantOffset(MRI, OrigOffset); + // On GFX1250+, voffset and immoffset are zero-extended from 32 bits before + // being added, so we can only safely match a 32-bit addition with no unsigned + // overflow. + bool CheckNUW = AMDGPU::isGFX1250(ST); + std::tie(BaseReg, ImmOffset) = AMDGPU::getBaseWithConstantOffset( + MRI, OrigOffset, /*KnownBits=*/nullptr, CheckNUW); // If BaseReg is a pointer, convert it to int. if (MRI.getType(BaseReg).isPointer()) diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp index ff8efd2..0d2feeb 100644 --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -4933,6 +4933,43 @@ bool AMDGPUAsmParser::validateOpSel(const MCInst &Inst) { return false; } + // Packed math FP32 instructions typically accept SGPRs or VGPRs as source + // operands. On gfx12+, if a source operand uses SGPRs, the HW can only read + // the first SGPR and use it for both the low and high operations. + if (isPackedFP32Inst(Opc) && isGFX12Plus()) { + int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); + int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); + int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel); + int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi); + + const MCOperand &Src0 = Inst.getOperand(Src0Idx); + const MCOperand &Src1 = Inst.getOperand(Src1Idx); + unsigned OpSel = Inst.getOperand(OpSelIdx).getImm(); + unsigned OpSelHi = Inst.getOperand(OpSelHiIdx).getImm(); + + const MCRegisterInfo *TRI = getContext().getRegisterInfo(); + + auto VerifyOneSGPR = [OpSel, OpSelHi](unsigned Index) -> bool { + unsigned Mask = 1U << Index; + return ((OpSel & Mask) == 0) && ((OpSelHi & Mask) == 0); + }; + + if (Src0.isReg() && isSGPR(Src0.getReg(), TRI) && + !VerifyOneSGPR(/*Index=*/0)) + return false; + if (Src1.isReg() && isSGPR(Src1.getReg(), TRI) && + !VerifyOneSGPR(/*Index=*/1)) + return false; + + int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); + if (Src2Idx != -1) { + const MCOperand &Src2 = Inst.getOperand(Src2Idx); + if (Src2.isReg() && isSGPR(Src2.getReg(), TRI) && + !VerifyOneSGPR(/*Index=*/2)) + return false; + } + } + return true; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 8f44c03..5b327fb 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -6106,6 +6106,7 @@ bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, case MVT::f64: return true; case MVT::f16: + case MVT::bf16: return Subtarget->has16BitInsts() && !denormalModeIsFlushAllF64F16(MF); default: break; @@ -10877,6 +10878,13 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op, } } +// Return whether the operation has NoUnsignedWrap property. +static bool isNoUnsignedWrap(SDValue Addr) { + return (Addr.getOpcode() == ISD::ADD && + Addr->getFlags().hasNoUnsignedWrap()) || + Addr->getOpcode() == ISD::OR; +} + bool SITargetLowering::shouldPreservePtrArith(const Function &F, EVT PtrVT) const { return UseSelectionDAGPTRADD && PtrVT == MVT::i64; @@ -10898,8 +10906,14 @@ SITargetLowering::splitBufferOffsets(SDValue Offset, SelectionDAG &DAG) const { if ((C1 = dyn_cast<ConstantSDNode>(N0))) N0 = SDValue(); else if (DAG.isBaseWithConstantOffset(N0)) { - C1 = cast<ConstantSDNode>(N0.getOperand(1)); - N0 = N0.getOperand(0); + // On GFX1250+, voffset and immoffset are zero-extended from 32 bits before + // being added, so we can only safely match a 32-bit addition with no + // unsigned overflow. + bool CheckNUW = AMDGPU::isGFX1250(*Subtarget); + if (!CheckNUW || isNoUnsignedWrap(N0)) { + C1 = cast<ConstantSDNode>(N0.getOperand(1)); + N0 = N0.getOperand(0); + } } if (C1) { diff --git a/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp b/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp index d8fe850..0a68512 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertHardClauses.cpp @@ -51,7 +51,7 @@ static cl::opt<unsigned> namespace { enum HardClauseType { - // For GFX10: + // For GFX10 and GFX1250: // Texture, buffer, global or scratch memory instructions. HARDCLAUSE_VMEM, @@ -102,7 +102,8 @@ public: HardClauseType getHardClauseType(const MachineInstr &MI) { if (MI.mayLoad() || (MI.mayStore() && ST->shouldClusterStores())) { - if (ST->getGeneration() == AMDGPUSubtarget::GFX10) { + if (ST->getGeneration() == AMDGPUSubtarget::GFX10 || + ST->hasGFX1250Insts()) { if ((SIInstrInfo::isVMEM(MI) && !SIInstrInfo::isFLAT(MI)) || SIInstrInfo::isSegmentSpecificFLAT(MI)) { if (ST->hasNSAClauseBug()) { @@ -115,7 +116,6 @@ public: if (SIInstrInfo::isFLAT(MI)) return HARDCLAUSE_FLAT; } else { - assert(ST->getGeneration() >= AMDGPUSubtarget::GFX11); if (SIInstrInfo::isMIMG(MI)) { const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode()); const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo = diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index f20b22d..19e6bcf 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -18,6 +18,7 @@ #include "GCNSubtarget.h" #include "SIMachineFunctionInfo.h" #include "Utils/AMDGPUBaseInfo.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/LiveIntervals.h" @@ -5534,6 +5535,15 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, } } + // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more + // information. + if (AMDGPU::isPackedFP32Inst(Opcode) && AMDGPU::isGFX12Plus(ST)) { + for (unsigned I = 0; I < 3; ++I) { + if (!isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, I)) + return false; + } + } + return true; } @@ -6005,6 +6015,21 @@ bool SIInstrInfo::isLegalRegOperand(const MachineInstr &MI, unsigned OpIdx, const MCOperandInfo OpInfo = MI.getDesc().operands()[OpIdx]; unsigned Opc = MI.getOpcode(); + // See SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more + // information. + if (AMDGPU::isPackedFP32Inst(MI.getOpcode()) && AMDGPU::isGFX12Plus(ST) && + MO.isReg() && RI.isSGPRReg(MRI, MO.getReg())) { + constexpr const AMDGPU::OpName OpNames[] = { + AMDGPU::OpName::src0, AMDGPU::OpName::src1, AMDGPU::OpName::src2}; + + for (auto [I, OpName] : enumerate(OpNames)) { + int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[I]); + if (static_cast<unsigned>(SrcIdx) == OpIdx && + !isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, I, &MO)) + return false; + } + } + if (!isLegalRegOperand(MRI, OpInfo, MO)) return false; @@ -6053,6 +6078,39 @@ bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, return true; } +bool SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand( + const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN, + const MachineOperand *MO) const { + constexpr const unsigned NumOps = 3; + constexpr const AMDGPU::OpName OpNames[NumOps * 2] = { + AMDGPU::OpName::src0, AMDGPU::OpName::src1, + AMDGPU::OpName::src2, AMDGPU::OpName::src0_modifiers, + AMDGPU::OpName::src1_modifiers, AMDGPU::OpName::src2_modifiers}; + + assert(SrcN < NumOps); + + if (!MO) { + int SrcIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[SrcN]); + if (SrcIdx == -1) + return true; + MO = &MI.getOperand(SrcIdx); + } + + if (!MO->isReg() || !RI.isSGPRReg(MRI, MO->getReg())) + return true; + + int ModsIdx = + AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpNames[NumOps + SrcN]); + if (ModsIdx == -1) + return true; + + unsigned Mods = MI.getOperand(ModsIdx).getImm(); + bool OpSel = Mods & SISrcMods::OP_SEL_0; + bool OpSelHi = Mods & SISrcMods::OP_SEL_1; + + return !OpSel && !OpSelHi; +} + bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, const MachineOperand *MO) const { const MachineFunction &MF = *MI.getParent()->getParent(); @@ -6390,6 +6448,15 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, if ((Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) && !RI.isVGPR(MRI, MI.getOperand(VOP3Idx[2]).getReg())) legalizeOpWithMove(MI, VOP3Idx[2]); + + // Fix the register class of packed FP32 instructions on gfx12+. See + // SIInstrInfo::isLegalGFX12PlusPackedMathFP32Operand for more information. + if (AMDGPU::isPackedFP32Inst(Opc) && AMDGPU::isGFX12Plus(ST)) { + for (unsigned I = 0; I < 3; ++I) { + if (!isLegalGFX12PlusPackedMathFP32Operand(MRI, MI, /*SrcN=*/I)) + legalizeOpWithMove(MI, VOP3Idx[I]); + } + } } Register SIInstrInfo::readlaneVGPRToSGPR( diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index e042b59..6b9403f 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -1287,6 +1287,19 @@ public: const MachineOperand &MO) const; bool isLegalRegOperand(const MachineInstr &MI, unsigned OpIdx, const MachineOperand &MO) const; + + /// Check if \p MO would be a legal operand for gfx12+ packed math FP32 + /// instructions. Packed math FP32 instructions typically accept SGPRs or + /// VGPRs as source operands. On gfx12+, if a source operand uses SGPRs, the + /// HW can only read the first SGPR and use it for both the low and high + /// operations. + /// \p SrcN can be 0, 1, or 2, representing src0, src1, and src2, + /// respectively. If \p MO is nullptr, the operand corresponding to SrcN will + /// be used. + bool isLegalGFX12PlusPackedMathFP32Operand( + const MachineRegisterInfo &MRI, const MachineInstr &MI, unsigned SrcN, + const MachineOperand *MO = nullptr) const; + /// Legalize operands in \p MI by either commuting it or inserting a /// copy of src1. void legalizeOperandsVOP2(MachineRegisterInfo &MRI, MachineInstr &MI) const; diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp index 00dcb9b..1e3e9a2 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -3318,6 +3318,20 @@ unsigned getLdsDwGranularity(const MCSubtargetInfo &ST) { return 128; } +bool isPackedFP32Inst(unsigned Opc) { + switch (Opc) { + case AMDGPU::V_PK_ADD_F32: + case AMDGPU::V_PK_ADD_F32_gfx12: + case AMDGPU::V_PK_MUL_F32: + case AMDGPU::V_PK_MUL_F32_gfx12: + case AMDGPU::V_PK_FMA_F32: + case AMDGPU::V_PK_FMA_F32_gfx12: + return true; + default: + return false; + } +} + } // namespace AMDGPU raw_ostream &operator<<(raw_ostream &OS, diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h index 1252e35..1bcd36c 100644 --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -1709,6 +1709,8 @@ bool isArgPassedInSGPR(const Argument *Arg); bool isArgPassedInSGPR(const CallBase *CB, unsigned ArgNo); +LLVM_READONLY bool isPackedFP32Inst(unsigned Opc); + LLVM_READONLY bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST, int64_t EncodedOffset); diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll index 62f8f89..79a9291 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1250 %s ; Natural mapping define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { @@ -99,26 +100,47 @@ define amdgpu_ps <2 x float> @raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vg ; GFX8-NEXT: $vgpr1 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to <2 x float> ret <2 x float> %cast @@ -142,22 +164,39 @@ define amdgpu_ps void @raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgp ; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -217,58 +256,111 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -328,57 +420,109 @@ define amdgpu_ps void @raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgp ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -400,21 +544,40 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll index 364ed62..9f1b7a6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1250 %s ; Natural mapping @@ -24,24 +25,43 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -66,22 +86,39 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__ ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -145,62 +182,119 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[COPY15]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[COPY15]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY15]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY15]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -263,60 +357,115 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__ ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -341,24 +490,46 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float @@ -395,33 +566,61 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -450,26 +649,47 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__ ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -542,71 +762,137 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0 - ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0 + ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0 + ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -673,64 +959,123 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__ ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -764,33 +1109,64 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY8]], [[COPY10]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %voffset = add i32 %voffset.base, 4095 %ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll index 46ca43b..7003bb1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s ; FIXME: Test with SI when argument lowering not broken for f16 ; Natural mapping @@ -124,52 +125,99 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -226,55 +274,105 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -509,23 +607,41 @@ define amdgpu_ps <2 x float> @raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sg ; GFX8-NEXT: $vgpr1 = COPY [[COPY7]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <2 x float> %val } @@ -551,25 +667,45 @@ define amdgpu_ps <3 x float> @raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sg ; GFX8-NEXT: $vgpr2 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 ; - ; GFX12-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; GFX1200-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 %val = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <3 x float> %val } @@ -597,27 +733,49 @@ define amdgpu_ps <4 x float> @raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sg ; GFX8-NEXT: $vgpr3 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr3 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX1200-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr3 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr3 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 %val = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <4 x float> %val } @@ -715,23 +873,41 @@ define amdgpu_ps <4 x half> @raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgp ; GFX8-NEXT: $vgpr1 = COPY [[COPY7]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <4 x half> @llvm.amdgcn.raw.buffer.load.v4f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <4 x half> %val } @@ -929,52 +1105,99 @@ define amdgpu_ps half @raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffse ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret half %val } @@ -1028,52 +1251,99 @@ define amdgpu_ps float @raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffse ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) %zext = zext i8 %val to i32 %cast = bitcast i32 %zext to float @@ -1194,20 +1464,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 16 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1229,20 +1517,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1267,20 +1573,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4096 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1522,54 +1846,103 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 - ; GFX12-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1200-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1250-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %soffset = add i32 %soffset.base, 5000 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -1627,52 +2000,102 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec + ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 5000 %val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret float %val diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll index 3fbfb63..4784ac5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll @@ -5,7 +5,8 @@ ; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11 -; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12 +; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200 +; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250 define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: name: raw_buffer_load_i8_tfe @@ -110,27 +111,49 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i8_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { i8, i32 } %res, 0 store i8 %data, ptr addrspace(1) %data_addr @@ -242,27 +265,49 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { i16, i32 } %res, 0 store i16 %data, ptr addrspace(1) %data_addr @@ -374,27 +419,49 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_f16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { half, i32 } %res, 0 store half %data, ptr addrspace(1) %data_addr @@ -506,27 +573,49 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { i32, i32 } %res, 0 store i32 %data, ptr addrspace(1) %data_addr @@ -646,29 +735,53 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <2 x i32>, i32 } %res, 0 store <2 x i32> %data, ptr addrspace(1) %data_addr @@ -788,29 +901,53 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <2 x float>, i32 } %res, 0 store <2 x float> %data, ptr addrspace(1) %data_addr @@ -977,30 +1114,55 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <3 x i32>, i32 } %res, 0 store <3 x i32> %data, ptr addrspace(1) %data_addr @@ -1167,30 +1329,55 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <3 x float>, i32 } %res, 0 store <3 x float> %data, ptr addrspace(1) %data_addr @@ -1318,31 +1505,57 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <4 x i32>, i32 } %res, 0 store <4 x i32> %data, ptr addrspace(1) %data_addr @@ -1470,31 +1683,57 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0) %data = extractvalue { <4 x float>, i32 } %res, 0 store <4 x float> %data, ptr addrspace(1) %data_addr diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll index 63ca7be..c365d57 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s ; FIXME: Test with SI when argument lowering not broken for f16 ; Natural mapping @@ -126,52 +127,99 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -309,55 +357,105 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -618,22 +716,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -657,23 +772,41 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX3_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -698,24 +831,43 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX4_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -876,22 +1028,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -946,54 +1115,103 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -1080,20 +1298,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 16 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1115,20 +1351,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4095 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1153,20 +1407,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4096 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1256,20 +1528,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 16 call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1291,20 +1581,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4095 call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1329,20 +1637,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 4096 call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1400,52 +1726,102 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %voffset.add = add i32 %voffset, 5000 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -1501,51 +1877,97 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 5000, i32 %soffset, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll index 75d6c59..484639a 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -22,23 +23,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -63,23 +82,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -109,28 +146,51 @@ define amdgpu_ps <2 x float> @struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc_ ; GFX8-NEXT: $vgpr1 = COPY [[COPY10]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY10]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY10]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY10]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to <2 x float> ret <2 x float> %cast @@ -156,24 +216,43 @@ define amdgpu_ps void @struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__ ; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -236,61 +315,117 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -353,60 +488,115 @@ define amdgpu_ps void @struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__ ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -431,23 +621,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_ ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) %cast = bitcast i32 %ret to float ret float %cast diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll index c9d1227..7dab257 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -25,26 +26,47 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg ; GFX8-NEXT: $vgpr0 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -71,24 +93,43 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cm ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -155,65 +196,125 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vg ; GFX8-NEXT: $vgpr0 = COPY [[COPY17]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[COPY17]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY17]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY17]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float ret float %cast @@ -279,63 +380,121 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cm ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]] + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]] + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -362,26 +521,50 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg ; GFX8-NEXT: $vgpr0 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY7]], [[COPY9]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY10]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i32 %ret to float @@ -420,35 +603,65 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -479,28 +692,51 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cm ; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -576,74 +812,143 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__v ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0 - ; GFX12-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0 + ; GFX1200-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0 + ; GFX1250-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double ret double %cast @@ -713,67 +1018,129 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cm ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 - ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 - ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]] + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]] + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1 + ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3 + ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -809,35 +1176,68 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s ; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 ; - ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 - ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec - ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec - ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1 + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec + ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY9]], [[COPY11]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1 + ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3 + ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub1 + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec + ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]] + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec + ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1 %voffset = add i32 %voffset.base, 4095 %ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %cast = bitcast i64 %ret to double diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll index 9b5e46b3..dbef90f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -21,22 +22,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -63,25 +81,45 @@ define amdgpu_ps <2 x float> @struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__ ; GFX8-NEXT: $vgpr1 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x float> %val } @@ -110,27 +148,49 @@ define amdgpu_ps <3 x float> @struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__ ; GFX8-NEXT: $vgpr2 = COPY [[COPY9]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 ; - ; GFX12-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; GFX1200-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2 %val = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <3 x float> %val } @@ -161,29 +221,53 @@ define amdgpu_ps <4 x float> @struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__ ; GFX8-NEXT: $vgpr3 = COPY [[COPY10]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 ; - ; GFX12-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]] - ; GFX12-NEXT: $vgpr3 = COPY [[COPY10]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX1200-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1200-NEXT: $vgpr3 = COPY [[COPY10]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]] + ; GFX1250-NEXT: $vgpr3 = COPY [[COPY10]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 %val = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <4 x float> %val } @@ -208,23 +292,41 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -248,22 +350,42 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %voffset = add i32 %voffset.base, 4095 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %val @@ -287,22 +409,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64 - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64 + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64 + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 64, i32 0) ret float %val } @@ -363,59 +502,113 @@ define amdgpu_ps float @struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]] + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %val } @@ -438,22 +631,39 @@ define amdgpu_ps float @struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = zext i8 %val to i32 %cast = bitcast i32 %ext to float @@ -478,22 +688,39 @@ define amdgpu_ps float @struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = sext i8 %val to i32 %cast = bitcast i32 %ext to float @@ -519,23 +746,41 @@ define amdgpu_ps float @struct_buffer_load_i8_sext_wrong_width(<4 x i32> inreg % ; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i8_sext_wrong_width - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec - ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i8_sext_wrong_width + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec + ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i8_sext_wrong_width + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec + ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %trunc = trunc i8 %val to i4 %ext = sext i4 %trunc to i32 @@ -561,22 +806,39 @@ define amdgpu_ps float @struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgp ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = zext i16 %val to i32 %cast = bitcast i32 %ext to float @@ -601,22 +863,39 @@ define amdgpu_ps float @struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgp ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %ext = sext i16 %val to i32 %cast = bitcast i32 %ext to float @@ -642,23 +921,41 @@ define amdgpu_ps float @struct_buffer_load_i16_sext_wrong_width(<4 x i32> inreg ; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_i16_sext_wrong_width - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec - ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_i16_sext_wrong_width + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec + ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_i16_sext_wrong_width + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec + ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) %trunc = trunc i16 %val to i8 %ext = sext i8 %trunc to i32 @@ -685,22 +982,39 @@ define amdgpu_ps half @struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voff ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call half @llvm.amdgcn.struct.buffer.load.f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret half %val } @@ -724,22 +1038,39 @@ define amdgpu_ps <2 x half> @struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__v ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call <2 x half> @llvm.amdgcn.struct.buffer.load.v2f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %val } @@ -772,25 +1103,45 @@ define amdgpu_ps <4 x half> @struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__v ; GFX8-NEXT: $vgpr1 = COPY [[COPY8]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; - ; GFX12-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 - ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]] - ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX1200-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; + ; GFX1250-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1 + ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]] + ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %val = call <4 x half> @llvm.amdgcn.struct.buffer.load.v4f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <4 x half> %val } @@ -814,22 +1165,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof ; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]] ; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 ; - ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] - ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; + ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]] + ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0 %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1) ret float %val } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll index 674fe1c..39cce20 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll @@ -5,7 +5,8 @@ ; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910 ; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11 -; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12 +; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200 +; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250 define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: name: raw_buffer_load_i8_tfe @@ -114,29 +115,53 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i8_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { i8, i32 } %res, 0 store i8 %data, ptr addrspace(1) %data_addr @@ -252,29 +277,53 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { i16, i32 } %res, 0 store i16 %data, ptr addrspace(1) %data_addr @@ -390,29 +439,53 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_f16_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { half, i32 } %res, 0 store half %data, ptr addrspace(1) %data_addr @@ -528,29 +601,53 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { i32, i32 } %res, 0 store i32 %data, ptr addrspace(1) %data_addr @@ -674,31 +771,57 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <2 x i32>, i32 } %res, 0 store <2 x i32> %data, ptr addrspace(1) %data_addr @@ -822,31 +945,57 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <2 x float>, i32 } %res, 0 store <2 x float> %data, ptr addrspace(1) %data_addr @@ -1018,32 +1167,59 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <3 x i32>, i32 } %res, 0 store <3 x i32> %data, ptr addrspace(1) %data_addr @@ -1215,32 +1391,59 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <3 x float>, i32 } %res, 0 store <3 x float> %data, ptr addrspace(1) %data_addr @@ -1372,33 +1575,61 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <4 x i32>, i32 } %res, 0 store <4 x i32> %data, ptr addrspace(1) %data_addr @@ -1530,33 +1761,61 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr ; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) ; GFX11-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] - ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 - ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) - ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2 + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3 + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4 + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3 + ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1) + ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1) + ; GFX1250-NEXT: S_ENDPGM 0 %res = call { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0) %data = extractvalue { <4 x float>, i32 } %res, 0 store <4 x float> %data, ptr addrspace(1) %data_addr diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll index 8183d85..c9771b5 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx810 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s -; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s ; Natural mapping define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { @@ -21,22 +22,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_ ; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -61,24 +79,43 @@ define amdgpu_ps void @struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -104,25 +141,45 @@ define amdgpu_ps void @struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX3_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -149,26 +206,47 @@ define amdgpu_ps void @struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX4_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -233,64 +311,123 @@ define amdgpu_ps void @struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vinde ; GFX8-NEXT: bb.5: ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: successors: %bb.2(0x80000000) - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 - ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 - ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]] - ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] - ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] - ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.2: - ; GFX12-NEXT: successors: %bb.3(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 - ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 - ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 - ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 - ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc - ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec - ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec - ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc - ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.3: - ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) - ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc - ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.4: - ; GFX12-NEXT: successors: %bb.5(0x80000000) - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: bb.5: - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: successors: %bb.2(0x80000000) + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]] + ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.2: + ; GFX1200-NEXT: successors: %bb.3(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.3: + ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.4: + ; GFX1200-NEXT: successors: %bb.5(0x80000000) + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: bb.5: + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: successors: %bb.2(0x80000000) + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7 + ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4 + ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_128_align2 = COPY [[REG_SEQUENCE]] + ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]] + ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]] + ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.2: + ; GFX1250-NEXT: successors: %bb.3(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub0_sub1 + ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub2_sub3 + ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1 + ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3 + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc + ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec + ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc + ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.3: + ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8) + ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc + ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.4: + ; GFX1250-NEXT: successors: %bb.5(0x80000000) + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]] + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: bb.5: + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -313,22 +450,39 @@ define amdgpu_ps void @struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__ ; GFX8-NEXT: BUFFER_STORE_BYTE_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %val.trunc = trunc i32 %val to i8 call void @llvm.amdgcn.struct.buffer.store.i8(i8 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void @@ -352,22 +506,39 @@ define amdgpu_ps void @struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex_ ; GFX8-NEXT: BUFFER_STORE_SHORT_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 %val.trunc = trunc i32 %val to i16 call void @llvm.amdgcn.struct.buffer.store.i16(i16 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void @@ -391,22 +562,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_ ; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1) ret void } @@ -429,22 +617,39 @@ define amdgpu_ps void @struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -475,24 +680,43 @@ define amdgpu_ps void @struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vinde ; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) ; GFX8-NEXT: S_ENDPGM 0 ; - ; GFX12-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset - ; GFX12: bb.1 (%ir-block.0): - ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 - ; GFX12-NEXT: {{ $}} - ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 - ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 - ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 - ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 - ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 - ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 - ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 - ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 - ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 - ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 - ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 - ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) - ; GFX12-NEXT: S_ENDPGM 0 + ; GFX1200-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1200: bb.1 (%ir-block.0): + ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1200-NEXT: S_ENDPGM 0 + ; + ; GFX1250-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset + ; GFX1250: bb.1 (%ir-block.0): + ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1 + ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2 + ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3 + ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4 + ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5 + ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3 + ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3 + ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6 + ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1 + ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8) + ; GFX1250-NEXT: S_ENDPGM 0 call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll index 682b3b4..3a82f84 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll @@ -370,6 +370,9 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) { ; GCN: ; %bb.0: ; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp ; GCN-NEXT: ; return to shader part epilog + + + %exp = call bfloat @llvm.exp2.bf16(bfloat %src) %max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0) %clamp = call bfloat @llvm.minnum.bf16(bfloat %max, bfloat 1.0) @@ -381,6 +384,9 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa ; GCN: ; %bb.0: ; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp ; GCN-NEXT: ; return to shader part epilog + + + %mul = fmul <2 x bfloat> %src0, %src1 %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>) %clamp = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %max, <2 x bfloat> <bfloat 1.0, bfloat 1.0>) @@ -391,11 +397,12 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) { ; GCN-LABEL: v_test_mul_add_v2bf16_vvv: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, v2, v3 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, v4 +; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm + + + %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -405,11 +412,12 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) { ; GCN-LABEL: v_test_mul_add_v2bf16_vss: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, s1 +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm + + + %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -419,11 +427,14 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) { ; GCN-LABEL: v_test_mul_add_v2bf16_sss: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, s0, s1 +; GCN-NEXT: v_mov_b32_e32 v2, s2 ; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, s2 +; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm + + + %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, %c store <2 x bfloat> %add, ptr addrspace(1) %out @@ -433,11 +444,12 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) { ; GCN-LABEL: v_test_mul_add_v2bf16_vsc: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, v2, 0.5 op_sel_hi:[1,0] +; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0] ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm + + + %mul = fmul contract <2 x bfloat> %a, %b %add = fadd contract <2 x bfloat> %mul, <bfloat 0.5, bfloat 0.5> store <2 x bfloat> %add, ptr addrspace(1) %out @@ -447,11 +459,14 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfloat> %a) { ; GCN-LABEL: v_test_mul_add_v2bf16_vll: ; GCN: ; %bb.0: -; GCN-NEXT: v_pk_mul_bf16 v2, 0x42c83f80, v2 -; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GCN-NEXT: v_pk_add_bf16 v2, 0x43484000, v2 +; GCN-NEXT: s_mov_b32 s0, 0x43484000 +; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0 ; GCN-NEXT: global_store_b32 v[0:1], v2, off ; GCN-NEXT: s_endpgm + + + %mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0> %add = fadd contract <2 x bfloat> %mul, <bfloat 2.0, bfloat 200.0> store <2 x bfloat> %add, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll index 52e697c..8f8ea13 100644 --- a/llvm/test/CodeGen/AMDGPU/bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/bf16.ll @@ -47043,18 +47043,10 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX8-LABEL: v_fmuladd_bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 -; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v1 +; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 @@ -47067,20 +47059,13 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX900-LABEL: v_fmuladd_bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX900-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX900-NEXT: s_movk_i32 s4, 0x7fff ; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc @@ -47090,35 +47075,25 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX950-LABEL: v_fmuladd_bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 +; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, s0 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc_lo -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo +; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX10-NEXT: v_bfe_u32 v0, v2, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_add3_u32 v0, v0, v2, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; @@ -47126,55 +47101,38 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) { ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, 0 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l ; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l ; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v1, v3 -; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc_lo -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v3, v1, v2 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v3 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v3 -; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo ; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v1, v3 :: v_dual_lshlrev_b32 v1, 16, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v0 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v2, 0x7fff +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call bfloat @llvm.fmuladd.bf16(bfloat %a, bfloat %b, bfloat %c) @@ -47235,39 +47193,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl ; GFX8-LABEL: v_fmuladd_v2bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX8-NEXT: v_add_f32_e32 v3, v3, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0 +; GFX8-NEXT: v_fma_f32 v3, v5, v4, v3 ; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3 +; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4 +; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc ; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, s4, v1 -; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1 ; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 @@ -47279,36 +47220,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl ; GFX900-LABEL: v_fmuladd_v2bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX900-NEXT: s_movk_i32 s4, 0x7fff -; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4 -; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v2 -; GFX900-NEXT: v_add_f32_e32 v3, v3, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2 +; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0 +; GFX900-NEXT: v_fma_f32 v3, v5, v4, v3 +; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2 ; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4 ; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc ; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v2 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc @@ -47319,150 +47246,94 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl ; GFX950-LABEL: v_fmuladd_v2bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v1 -; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v0 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 +; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v1 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2 -; GFX950-NEXT: v_add_f32_e32 v3, v3, v4 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v1 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 +; GFX950-NEXT: v_fmac_f32_e32 v3, v5, v4 +; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_v2bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v0 -; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_mul_f32_e32 v3, v4, v3 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX10-NEXT: v_bfe_u32 v1, v3, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX10-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v1, v1, v3, 0x7fff ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX10-NEXT: v_add3_u32 v4, v4, v0, 0x7fff +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v0 ; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 ; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo -; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1 -; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo -; GFX10-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 +; GFX10-NEXT: v_fmac_f32_e32 v3, v5, v4 +; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1 +; GFX10-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX10-NEXT: v_add3_u32 v0, v0, v3, 0x7fff +; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11TRUE16-LABEL: v_fmuladd_v2bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0 ; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v1, v0 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4 +; GFX11TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_v2bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0 ; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff +; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4 +; GFX11FAKE16-NEXT: v_bfe_u32 v1, v2, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v3, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 +; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 +; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo ; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302 +; GFX11FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) ret <2 x bfloat> %op @@ -47542,57 +47413,33 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl ; GFX8-LABEL: v_fmuladd_v3bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3 -; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc -; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX8-NEXT: s_movk_i32 s4, 0x7fff -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3 ; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX8-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v0 +; GFX8-NEXT: v_fma_f32 v3, v6, v5, v3 ; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2 ; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 @@ -47605,52 +47452,31 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl ; GFX900-LABEL: v_fmuladd_v3bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 ; GFX900-NEXT: s_movk_i32 s4, 0x7fff ; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 -; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc -; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX900-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 ; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 -; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX900-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v0 +; GFX900-NEXT: v_fma_f32 v3, v6, v5, v3 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 ; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc ; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc @@ -47662,211 +47488,132 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl ; GFX950-LABEL: v_fmuladd_v3bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX950-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, s0 +; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v4 +; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v2 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v0 +; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX950-NEXT: v_add_f32_e32 v3, v3, v5 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 +; GFX950-NEXT: v_fmac_f32_e32 v3, v6, v5 +; GFX950-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v4, v3 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_v3bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v0 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX10-NEXT: v_mul_f32_e32 v3, v7, v6 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX10-NEXT: v_add3_u32 v7, v7, v3, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v4 ; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_add_f32_e32 v1, v1, v3 ; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX10-NEXT: v_add_f32_e32 v2, v2, v5 ; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v4 -; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1 -; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff -; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo -; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo -; GFX10-NEXT: v_alignbit_b32 v1, s4, v1, 16 +; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX10-NEXT: v_bfe_u32 v1, v6, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX10-NEXT: v_bfe_u32 v0, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1 +; GFX10-NEXT: v_add3_u32 v1, v1, v6, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v5 +; GFX10-NEXT: v_add3_u32 v0, v0, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v2, v1, 0x7060302 +; GFX10-NEXT: v_alignbit_b32 v1, s4, v3, 16 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11TRUE16-LABEL: v_fmuladd_v3bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0 -; GFX11TRUE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX11TRUE16-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1 -; GFX11TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v4, v0, v2 :: v_dual_fmac_f32 v5, v1, v3 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6 ; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff -; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff -; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4 -; GFX11TRUE16-NEXT: v_add3_u32 v8, v9, v1, 0x7fff -; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v10, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v2, v3 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v1, v8, v6 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_and_b32 v1, 0xffff0000, v1 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_add_f32_e32 v1, v1, v5 -; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2 -; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_add3_u32 v5, v6, v1, 0x7fff -; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 +; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v6, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11TRUE16-NEXT: v_add3_u32 v2, v3, v5, 0x7fff +; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 ; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v2, v0 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_v3bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0 -; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v0, 0x7fff -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v3, v7, v6 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3 -; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0 ; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) ; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo -; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v5 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) ; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v3 -; GFX11FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1 -; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff -; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v1, 0x7fff -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v1, 16 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v5, 16, v5 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX11FAKE16-NEXT: v_bfe_u32 v1, v6, 16, 1 +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11FAKE16-NEXT: v_bfe_u32 v0, v5, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v6, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v5, 0x7fff +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v1, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v3, 16 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat> %c) ret <3 x bfloat> %op @@ -47966,75 +47713,43 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl ; GFX8-LABEL: v_fmuladd_v4bf16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v1 -; GFX8-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7 -; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc -; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v5 -; GFX8-NEXT: v_add_f32_e32 v6, v6, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX8-NEXT: v_fma_f32 v6, v8, v7, v6 ; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6 +; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7 -; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7 +; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 +; GFX8-NEXT: s_movk_i32 s4, 0x7fff ; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc ; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 ; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v1 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 -; GFX8-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1 -; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3 ; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 -; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc -; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX8-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v0 +; GFX8-NEXT: v_fma_f32 v3, v7, v5, v3 ; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1 ; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3 +; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5 -; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc ; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2 -; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX8-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2 ; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 @@ -48048,68 +47763,40 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl ; GFX900-LABEL: v_fmuladd_v4bf16: ; GFX900: ; %bb.0: ; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v1 -; GFX900-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX900-NEXT: s_movk_i32 s4, 0x7fff -; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4 -; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 -; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc -; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v5 -; GFX900-NEXT: v_add_f32_e32 v6, v6, v7 +; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; GFX900-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX900-NEXT: v_fma_f32 v6, v8, v7, v6 +; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX900-NEXT: s_movk_i32 s4, 0x7fff +; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5 ; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4 ; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6 ; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc ; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 -; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v1 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 -; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc -; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v5 -; GFX900-NEXT: v_add_f32_e32 v1, v1, v3 -; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1 -; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4 ; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1 ; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc -; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0 -; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 -; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 -; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc -; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4 -; GFX900-NEXT: v_add_f32_e32 v3, v3, v5 +; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4 +; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2 +; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v0 +; GFX900-NEXT: v_fma_f32 v3, v7, v5, v3 +; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 ; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1 -; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4 ; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4 ; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3 ; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 ; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc ; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 -; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0 -; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 -; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc -; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4 -; GFX900-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1 -; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4 ; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0 ; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0 ; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc @@ -48121,264 +47808,162 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl ; GFX950-LABEL: v_fmuladd_v4bf16: ; GFX950: ; %bb.0: ; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 -; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v1 +; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v3 +; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v1 +; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5 -; GFX950-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v5 -; GFX950-NEXT: v_add_f32_e32 v1, v1, v3 +; GFX950-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 ; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0 +; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v0 +; GFX950-NEXT: v_fmac_f32_e32 v1, v7, v3 +; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v4 ; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3 -; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v6, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0 -; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4 -; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX950-NEXT: v_add_f32_e32 v6, v6, v7 -; GFX950-NEXT: v_add_f32_e32 v3, v3, v5 -; GFX950-NEXT: v_add_f32_e32 v0, v0, v2 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3 -; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6 +; GFX950-NEXT: v_fmac_f32_e32 v3, v0, v2 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v3, v1 +; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, v6 ; GFX950-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: v_fmuladd_v4bf16: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 ; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 ; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v0 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_mul_f32_e32 v6, v7, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2 +; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v2 +; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 ; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v5 +; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 ; GFX10-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6 -; GFX10-NEXT: v_mul_f32_e32 v7, v9, v7 -; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX10-NEXT: v_add3_u32 v10, v10, v6, 0x7fff +; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX10-NEXT: v_fmac_f32_e32 v7, v9, v8 +; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v6 +; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX10-NEXT: v_add3_u32 v0, v10, v6, 0x7fff +; GFX10-NEXT: v_bfe_u32 v2, v5, 16, 1 +; GFX10-NEXT: v_bfe_u32 v3, v7, 16, 1 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX10-NEXT: v_bfe_u32 v9, v7, 16, 1 -; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX10-NEXT: v_bfe_u32 v11, v0, 16, 1 -; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v7 -; GFX10-NEXT: v_add3_u32 v9, v9, v7, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v0 -; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo +; GFX10-NEXT: v_bfe_u32 v8, v4, 16, 1 +; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX10-NEXT: v_add3_u32 v0, v2, v5, 0x7fff +; GFX10-NEXT: v_add3_u32 v2, v3, v7, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v7 ; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 -; GFX10-NEXT: v_add3_u32 v11, v11, v0, 0x7fff -; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 -; GFX10-NEXT: v_add_f32_e32 v3, v3, v8 -; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v10, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4 -; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo -; GFX10-NEXT: v_add_f32_e32 v1, v1, v5 -; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX10-NEXT: v_add_f32_e32 v2, v2, v6 -; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1 -; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1 -; GFX10-NEXT: v_add_f32_e32 v0, v0, v4 -; GFX10-NEXT: v_add3_u32 v4, v7, v3, 0x7fff -; GFX10-NEXT: v_bfe_u32 v7, v2, 16, 1 -; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo -; GFX10-NEXT: v_add3_u32 v4, v6, v1, 0x7fff -; GFX10-NEXT: v_add3_u32 v5, v7, v2, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2 -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff -; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo -; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo -; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302 +; GFX10-NEXT: v_add3_u32 v6, v8, v4, 0x7fff +; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo +; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo +; GFX10-NEXT: v_perm_b32 v0, v3, v2, 0x7060302 +; GFX10-NEXT: v_perm_b32 v1, v4, v1, 0x7060302 ; GFX10-NEXT: s_setpc_b64 s[30:31] ; ; GFX11TRUE16-LABEL: v_fmuladd_v4bf16: ; GFX11TRUE16: ; %bb.0: ; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3 -; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_lshlrev_b32 v3, 16, v3 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 +; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v1 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5 -; GFX11TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1 +; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v5 ; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_mul_f32_e32 v1, v1, v3 -; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v2 -; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v9, v7 :: v_dual_lshlrev_b32 v2, 16, v2 -; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6 -; GFX11TRUE16-NEXT: v_add3_u32 v9, v10, v6, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2 -; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1 -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v7, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1 -; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v0 -; GFX11TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX11TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v9, v11 :: v_dual_and_b32 v1, 0xffff0000, v1 -; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v0, 0xffff0000, v0 -; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v6, v8 -; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX11TRUE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4 +; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2 +; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4 +; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1 +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v10, v8 +; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5 +; GFX11TRUE16-NEXT: v_fmac_f32_e32 v1, v0, v2 +; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v5, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1 +; GFX11TRUE16-NEXT: v_bfe_u32 v0, v7, 16, 1 +; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7 +; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6 +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 +; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v7, 0x7fff +; GFX11TRUE16-NEXT: v_add3_u32 v4, v9, v6, 0x7fff +; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1 ; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1 -; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v7, v6 :: v_dual_lshlrev_b32 v6, 16, v4 -; GFX11TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1 -; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11TRUE16-NEXT: v_add3_u32 v5, v5, v1, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v6 :: v_dual_and_b32 v3, 0xffff0000, v3 -; GFX11TRUE16-NEXT: v_add3_u32 v6, v7, v2, 0x7fff -; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2 +; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo +; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 +; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v1, 0x7fff +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h +; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc_lo ; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_add_f32_e32 v3, v3, v4 -; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0 ; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v10, vcc_lo -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4) -; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc_lo -; GFX11TRUE16-NEXT: v_bfe_u32 v9, v3, 16, 1 -; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3 -; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v1, v2 -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v3, 0x7fff -; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v8, vcc_lo -; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v3 +; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1 +; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v3, v2 ; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31] ; ; GFX11FAKE16-LABEL: v_fmuladd_v4bf16: ; GFX11FAKE16: ; %bb.0: ; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v1 +; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 ; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v9, 16, v0 ; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v1 -; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v3 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v3 ; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5 -; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2 -; GFX11FAKE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v5 +; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3 +; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4 +; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1) ; GFX11FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6 +; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v6 ; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6 -; GFX11FAKE16-NEXT: v_mul_f32_e32 v7, v9, v7 -; GFX11FAKE16-NEXT: v_add3_u32 v10, v10, v6, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1 -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, v0, v2 -; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v7 -; GFX11FAKE16-NEXT: v_add3_u32 v9, v9, v7, 0x7fff -; GFX11FAKE16-NEXT: v_bfe_u32 v11, v0, 16, 1 -; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_add3_u32 v11, v11, v0, 0x7fff -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v1, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v4 +; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2 +; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2 +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v10, v6, 0x7fff +; GFX11FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo +; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v9, v8 +; GFX11FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1 +; GFX11FAKE16-NEXT: v_add3_u32 v0, v2, v5, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4) +; GFX11FAKE16-NEXT: v_bfe_u32 v3, v7, 16, 1 ; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7 -; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v2, v9, v10 :: v_dual_and_b32 v1, 0xffff0000, v1 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v2, 0xffff0000, v2 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1 -; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v6 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0 -; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3 -; GFX11FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1 -; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v4 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11FAKE16-NEXT: v_add_f32_e32 v3, v3, v8 -; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1 -; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3 -; GFX11FAKE16-NEXT: v_add3_u32 v4, v7, v3, 0x7fff -; GFX11FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo -; GFX11FAKE16-NEXT: v_add3_u32 v4, v6, v1, 0x7fff -; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v2, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2 -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2 -; GFX11FAKE16-NEXT: v_add3_u32 v7, v8, v0, 0x7fff -; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo -; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1 -; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1) -; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302 -; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo -; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v3, 0x7060302 +; GFX11FAKE16-NEXT: v_add3_u32 v6, v8, v4, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4 +; GFX11FAKE16-NEXT: v_add3_u32 v2, v3, v7, 0x7fff +; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v7 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3) +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo +; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5 +; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo +; GFX11FAKE16-NEXT: v_perm_b32 v0, v3, v2, 0x7060302 +; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x7060302 ; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31] %op = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c) ret <4 x bfloat> %op diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll index 7d36c9f..004d3c0 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll @@ -284,6 +284,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -329,6 +330,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof ; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 @@ -382,6 +384,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo +; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off ; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 @@ -430,6 +433,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i ; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo +; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off ; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll index 3a898a9..f0db321 100644 --- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll +++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll @@ -244,8 +244,9 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac ; GCN-GISEL-NEXT: global_load_b128 v[60:63], v[0:1], off offset:16 ; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:240 ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 -; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE ; 16-byte Folded Spill -; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; 16-byte Folded Reload +; GCN-GISEL-NEXT: s_clause 0x1 +; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE +; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; GCN-GISEL-NEXT: s_wait_loadcnt 0x0 ; GCN-GISEL-NEXT: s_clause 0xe ; GCN-GISEL-NEXT: global_store_b128 v[46:47], v[0:3], off offset:32 diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir index 8007597..492753b 100644 --- a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir +++ b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir @@ -1,6 +1,507 @@ # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12 -# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1200 +# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1250 + +--- +name: long_clause +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; GFX1200-LABEL: name: long_clause + ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1200-NEXT: S_CLAUSE 31 + ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec + ; GFX1200-NEXT: } + ; GFX1200-NEXT: BUNDLE implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1200-NEXT: S_CLAUSE 31 + ; GFX1200-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec + ; GFX1200-NEXT: } + ; GFX1200-NEXT: BUNDLE implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1200-NEXT: S_CLAUSE 15 + ; GFX1200-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec + ; GFX1200-NEXT: } + ; + ; GFX1250-LABEL: name: long_clause + ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1250-NEXT: S_CLAUSE 62 + ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec + ; GFX1250-NEXT: } + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec { + ; GFX1250-NEXT: S_CLAUSE 16 + ; GFX1250-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec + $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec + $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec + $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec + $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec + $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec + $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec + $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec + $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec + $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec + $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec + $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec + $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec + $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec + $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec + $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec + $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec + $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec + $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec + $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec + $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec + $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec + $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec + $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec + $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec + $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec + $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec + $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec + $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec + $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec + $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec + $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec + $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec + $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec + $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec + $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec + $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec + $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec + $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec + $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec + $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec + $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec + $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec + $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec + $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec + $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec + $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec + $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec + $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec + $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec + $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec + $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec + $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec + $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec + $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec + $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec + $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec + $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec + $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec + $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec + $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec + $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec + $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec + $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec + $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec + $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec + $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec + $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec + $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec + $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec + $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec + $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec + $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec + $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec + $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec + $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec + $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec + $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec + $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec + $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec +... + +--- +name: kill +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr4 + ; GFX12-LABEL: name: kill + ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + ; GFX12-NEXT: KILL undef renamable $sgpr4 + ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 + ; GFX12-NEXT: } + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + KILL undef renamable $sgpr4 + $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 +... + +--- +name: kill2 +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5 + ; GFX12-LABEL: name: kill2 + ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + ; GFX12-NEXT: KILL undef renamable $sgpr4 + ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 + ; GFX12-NEXT: } + ; GFX12-NEXT: KILL undef renamable $sgpr5 + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0 + KILL undef renamable $sgpr4 + $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 + KILL undef renamable $sgpr5 +... + +--- +name: flat_load_atomic +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: flat_load_atomic + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1200-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; + ; GFX1250-LABEL: name: flat_load_atomic + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: } + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr +... + +--- +name: global_load_atomic +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: global_load_atomic + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; + ; GFX1250-LABEL: name: global_load_atomic + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec +... + +--- +name: flat_global_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GFX12-LABEL: name: flat_global_load + ; GFX12: liveins: $vgpr0_vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr +... + +--- +name: buffer_load_atomic +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-LABEL: name: buffer_load_atomic + ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1200-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec + ; + ; GFX1250-LABEL: name: buffer_load_atomic + ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr0, implicit-def $vgpr0_lo16, implicit-def $vgpr0_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec +... + +--- +name: flat_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: flat_load_store + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1200-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; + ; GFX1250-LABEL: name: flat_load_store + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr + ; GFX1250-NEXT: } + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr +... + +--- +name: global_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-LABEL: name: global_load_store + ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1200-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; + ; GFX1250-LABEL: name: global_load_store + ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX1250-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec +... + +--- +name: buffer_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-LABEL: name: buffer_load_store + ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1200-NEXT: {{ $}} + ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1200-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec + ; + ; GFX1250-LABEL: name: buffer_load_store + ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0 + ; GFX1250-NEXT: {{ $}} + ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 { + ; GFX1250-NEXT: S_CLAUSE 1 + ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + ; GFX1250-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec + ; GFX1250-NEXT: } + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec +... + +--- +name: flat_load_global_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX12-LABEL: name: flat_load_global_load + ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec +... + +--- +name: global_load_buffer_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4 + ; GFX12-LABEL: name: global_load_buffer_store + ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + ; GFX12-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec + $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec + BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec +... --- name: flat_prefetch_flat_load @@ -31,3 +532,106 @@ body: | GLOBAL_PREFETCH_B8 $vgpr0_vgpr1, 0, 0, implicit $exec $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr ... + +--- +name: async_load_async_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-LABEL: name: async_load_async_store + ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $asynccnt, implicit $vgpr2, implicit $vgpr0_vgpr1, implicit $exec, implicit $asynccnt { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + ; GFX12-NEXT: GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit internal $asynccnt + ; GFX12-NEXT: } + GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt +... + +--- +name: async_load_ds_load_tr +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-LABEL: name: async_load_ds_load_tr + ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec + GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt + $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec +... + +--- +name: ds_load_trs_ds_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; GFX12-LABEL: name: ds_load_trs_ds_load + ; GFX12: liveins: $vgpr0 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec + ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec + ; GFX12-NEXT: $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec + $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec + $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec + $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec +... + +# Make sure we do not clause DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 with anything +--- +name: ds_atomic_async_barrier_arrive_b64_ds_read +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_ds_read + ; GFX12: liveins: $vgpr0, $vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec + ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + ; GFX12-NEXT: $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec + $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec + DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec +... + +--- +name: ds_atomic_async_barrier_arrive_b64_flat_load +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_flat_load + ; GFX12: liveins: $vgpr0, $vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr + $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec + $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr +... + +--- +name: global_load_switching_scope +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; GFX12-LABEL: name: global_load_switching_scope + ; GFX12: liveins: $vgpr0_vgpr1 + ; GFX12-NEXT: {{ $}} + ; GFX12-NEXT: BUNDLE implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr { + ; GFX12-NEXT: S_CLAUSE 1 + ; GFX12-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr + ; GFX12-NEXT: } + $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr + $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr +... diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll index 7a20b5c..a2c1545 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll @@ -1,27 +1,52 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -34,23 +59,42 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_i32_off(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -62,23 +106,43 @@ bb2: ret void } define amdgpu_kernel void @raw_atomic_buffer_load_i32_soff(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -90,23 +154,42 @@ bb2: ret void } define amdgpu_kernel void @raw_atomic_buffer_load_i32_dlc(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -119,24 +202,44 @@ bb2: } define amdgpu_kernel void @raw_nonatomic_buffer_load_i32(<4 x i32> %addr) { -; CHECK-LABEL: raw_nonatomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_nonatomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_nonatomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -149,23 +252,43 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_i64(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -179,23 +302,42 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_v2i16(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -209,68 +351,151 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) { -; CHECK-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: raw_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX11-FAKE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: raw_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX11-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: raw_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX12-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX12-FAKE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -285,23 +510,42 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_v4i32(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB8_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB8_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB8_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -315,25 +559,46 @@ bb2: } define amdgpu_kernel void @raw_atomic_buffer_load_ptr(<4 x i32> %addr) { -; CHECK-LABEL: raw_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v1, v[1:2] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v1, v[1:2] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v1, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll index 5c0e34c..d51e912 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll @@ -1,58 +1,95 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=CHECK %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX12 %s define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_add_nc_u32_e32 v1, 24, v1 +; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen +; GFX12-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 24 %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 92, i32 %soffset, i32 0) ret void } define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) { -; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; CHECK: ; %bb.0: -; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT +; GFX12-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll index 8a6594f..1a1a1f7 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll @@ -6,6 +6,7 @@ ; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10 ; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11 ; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 +; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: raw_buffer_load_i8_tfe: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll index 89511de..eeea1456 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll @@ -3,6 +3,7 @@ ; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck -check-prefixes=GFX11 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12 %s +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 | FileCheck -check-prefixes=GFX12 %s define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) { ; GFX68-LABEL: buffer_store: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll index 561ec7d..6f7c001 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll @@ -1,27 +1,52 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @raw_ptr_atomic_buffer_ptr_load_i32(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_ptr_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_ptr_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_ptr_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -34,23 +59,42 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -62,23 +106,43 @@ bb2: ret void } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -90,23 +154,42 @@ bb2: ret void } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -119,24 +202,44 @@ bb2: } define amdgpu_kernel void @raw_nonptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_nonptr_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_nonptr_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_nonptr_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -149,23 +252,43 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -179,23 +302,42 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -209,68 +351,151 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr) { -; CHECK-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX11-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX11-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm +; +; GFX12-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX12-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -285,23 +510,42 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB8_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB8_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB8_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -315,25 +559,46 @@ bb2: } define amdgpu_kernel void @raw_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr) { -; CHECK-LABEL: raw_ptr_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v1, v[1:2] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: raw_ptr_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v1, v[1:2] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: raw_ptr_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v1, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll index 8b6ba1a..2c3b521 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll @@ -1,104 +1,174 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; FIXME: Test 90a, 940. 908 should fail to select. -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 128 %ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret <2 x bfloat> %ret } define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2) ret <2 x bfloat> %ret } define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 128 %unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret void } define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2) ret void } ; Test waterfall loop define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset) #0 { -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: v_readfirstlane_b32 s3, v6 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] -; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v6 -; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_b32 s0, s0, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s0, s0 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr6 -; GFX12-NEXT: ; implicit-def: $vgpr5 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 -; GFX12-NEXT: s_cbranch_execnz .LBB4_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: v_readfirstlane_b32 s3, v6 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] +; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v6 +; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_b32 s0, s0, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr6 +; GFX1200-NEXT: ; implicit-def: $vgpr5 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1200-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v11, v4 :: v_dual_mov_b32 v10, v3 +; GFX1250-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v8, v1 +; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v5 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v8 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v9 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v10 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v11 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v6 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[8:9] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[10:11] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v6 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[4:7], s3 offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9_vgpr10_vgpr11 +; GFX1250-NEXT: ; implicit-def: $vgpr6 +; GFX1250-NEXT: ; implicit-def: $vgpr1 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 128 %ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0) ret <2 x bfloat> %ret diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll index 8141e0d..ea8f836 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll @@ -2,7 +2,8 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { ; GFX908-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -26,15 +27,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24) ret void } @@ -61,15 +69,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0) ret void } @@ -96,15 +111,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -131,15 +153,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffs ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0) ret void } @@ -166,15 +195,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll index 767117d..2838740 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 { ; GFX90A-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -18,16 +19,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24) ret float %ret } @@ -47,16 +56,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset_ ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -76,16 +93,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgp ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %ret } @@ -105,16 +130,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_v ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0) ret <2 x half> %ret } @@ -134,16 +167,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs ; GFX942-NEXT: s_waitcnt vmcnt(0) ; GFX942-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2) ret float %ret } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll index 3540468..4dd258b 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll @@ -3,7 +3,8 @@ ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) { ; GFX7-LABEL: raw_ptr_buffer_load_bf16: @@ -41,6 +42,14 @@ define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_u16 v0, off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_u16 v0, off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call bfloat @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret bfloat %val } @@ -82,6 +91,14 @@ define <2 x bfloat> @raw_ptr_buffer_load_v2bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_b32 v0, off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_v2bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v0, off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret <2 x bfloat> %val } @@ -125,6 +142,14 @@ define <4 x bfloat> @raw_ptr_buffer_load_v4bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_b64 v[0:1], off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_v4bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call <4 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v4bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret <4 x bfloat> %val } @@ -178,6 +203,14 @@ define <8 x bfloat> @raw_ptr_buffer_load_v8bf16(ptr addrspace(8) inreg %rsrc) { ; GFX11-NEXT: buffer_load_b128 v[0:3], off, s[0:3], 0 ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: raw_ptr_buffer_load_v8bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b128 v[0:3], off, s[0:3], null +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: s_set_pc_i64 s[30:31] %val = call <8 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v8bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0) ret <8 x bfloat> %val } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll index e1f84dc..ec7d7d4 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll @@ -3,7 +3,8 @@ ; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %data, i32 %offset) { ; GFX7-LABEL: buffer_store_bf16: @@ -32,6 +33,11 @@ define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %d ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.bf16(bfloat %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } @@ -65,6 +71,11 @@ define amdgpu_ps void @buffer_store_v2bf16(ptr addrspace(8) inreg %rsrc, <2 x bf ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_v2bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.v2bf16(<2 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } @@ -102,6 +113,11 @@ define amdgpu_ps void @buffer_store_v4bf16(ptr addrspace(8) inreg %rsrc, <4 x bf ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_v4bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.v4bf16(<4 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } @@ -153,6 +169,11 @@ define amdgpu_ps void @buffer_store_v8bf16(ptr addrspace(8) inreg %rsrc, <8 x bf ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_v8bf16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null offen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.raw.ptr.buffer.store.v8bf16(<8 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll index f6f614e..8896364 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll @@ -1,30 +1,58 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @struct_atomic_buffer_load_i32(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -37,23 +65,43 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i32_const_idx(<4 x i32> %addr) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_const_idx: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_const_idx: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_const_idx: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 15 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -66,26 +114,48 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i32_off(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -98,26 +168,49 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i32_soff(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -129,26 +222,48 @@ bb2: ret void } define amdgpu_kernel void @struct_atomic_buffer_load_i32_dlc(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -161,26 +276,49 @@ bb2: } define amdgpu_kernel void @struct_nonatomic_buffer_load_i32(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_nonatomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_nonatomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_nonatomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -193,26 +331,49 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_i64(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v2, s6 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v2, s6 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v2, s6 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -226,26 +387,48 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_v2i16(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB7_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB7_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB7_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -259,77 +442,172 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32 %index) { -; CHECK-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1 -; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: struct_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: struct_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_clause 0x1 -; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX12-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: struct_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_clause 0x1 -; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX12-FAKE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_clause 0x1 +; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -344,26 +622,48 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_v4i32(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -377,28 +677,52 @@ bb2: } define amdgpu_kernel void @struct_atomic_buffer_load_ptr(<4 x i32> %addr, i32 %index) { -; CHECK-LABEL: struct_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB10_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v2, v[2:3] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB10_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB10_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v2, v[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB10_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB10_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v2, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB10_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll index 13b28d4..9abbc06 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll @@ -6,6 +6,7 @@ ; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10 ; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11 ; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 +; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12 define amdgpu_ps void @struct_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) { ; GFX67-LABEL: struct_buffer_load_i8_tfe: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll index 9ce33c6..822016b 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll @@ -3,6 +3,8 @@ ; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s ; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s +; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) { ; GFX68-LABEL: buffer_store: @@ -21,6 +23,15 @@ define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, < ; GFX11-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], 0 idxen glc ; GFX11-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], 0 idxen slc ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_mov_b32_e32 v12, 0 +; GFX12-NEXT: s_clause 0x2 +; GFX12-NEXT: buffer_store_b128 v[0:3], v12, s[0:3], null idxen +; GFX12-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], null idxen th:TH_STORE_NT +; GFX12-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], null idxen th:TH_STORE_HT +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0) call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1) @@ -40,6 +51,12 @@ define amdgpu_ps void @buffer_store_immoffs(<4 x i32> inreg, <4 x float>) { ; GFX11-NEXT: v_mov_b32_e32 v4, 0 ; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen offset:42 ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_immoffs: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_mov_b32_e32 v4, 0 +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen offset:42 +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 42, i32 0, i32 0) ret void @@ -55,6 +72,11 @@ define amdgpu_ps void @buffer_store_idx(<4 x i32> inreg, <4 x float>, i32) { ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_idx: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0) ret void @@ -76,6 +98,12 @@ define amdgpu_ps void @buffer_store_ofs(<4 x i32> inreg, <4 x float>, i32) { ; GFX11-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, s4 ; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_ofs: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, 0 +; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 %2, i32 0, i32 0) ret void @@ -91,6 +119,11 @@ define amdgpu_ps void @buffer_store_both(<4 x i32> inreg, <4 x float>, i32, i32) ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_both: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 %3, i32 0, i32 0) ret void @@ -108,6 +141,12 @@ define amdgpu_ps void @buffer_store_both_reversed(<4 x i32> inreg, <4 x float>, ; GFX11-NEXT: v_mov_b32_e32 v6, v4 ; GFX11-NEXT: buffer_store_b128 v[0:3], v[5:6], s[0:3], 0 idxen offen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_both_reversed: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_dual_mov_b32 v6, v5 :: v_dual_mov_b32 v7, v4 +; GFX12-NEXT: buffer_store_b128 v[0:3], v[6:7], s[0:3], null idxen offen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %3, i32 %2, i32 0, i32 0) ret void @@ -139,6 +178,15 @@ define amdgpu_ps void @buffer_store_wait(<4 x i32> inreg, <4 x float>, i32, i32, ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_wait: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen +; GFX12-NEXT: buffer_load_b128 v[0:3], v5, s[0:3], null idxen +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0) %data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %0, i32 %3, i32 0, i32 0, i32 0) @@ -156,6 +204,11 @@ define amdgpu_ps void @buffer_store_x1(<4 x i32> inreg %rsrc, float %data, i32 % ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_x1: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.f32(float %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void @@ -171,6 +224,11 @@ define amdgpu_ps void @buffer_store_x2(<4 x i32> inreg %rsrc, <2 x float> %data, ; GFX11: ; %bb.0: ; %main_body ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_x2: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void @@ -193,6 +251,15 @@ define amdgpu_ps void @buffer_store_int(<4 x i32> inreg, <4 x i32>, <2 x i32>, i ; GFX11-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], 0 idxen glc ; GFX11-NEXT: buffer_store_b32 v6, v7, s[0:3], 0 idxen slc ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: buffer_store_int: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_mov_b32_e32 v7, 0 +; GFX12-NEXT: s_clause 0x2 +; GFX12-NEXT: buffer_store_b128 v[0:3], v7, s[0:3], null idxen +; GFX12-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], null idxen th:TH_STORE_NT +; GFX12-NEXT: buffer_store_b32 v6, v7, s[0:3], null idxen th:TH_STORE_HT +; GFX12-NEXT: s_endpgm main_body: call void @llvm.amdgcn.struct.buffer.store.v4i32(<4 x i32> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0) call void @llvm.amdgcn.struct.buffer.store.v2i32(<2 x i32> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1) @@ -212,6 +279,12 @@ define amdgpu_ps void @struct_buffer_store_byte(<4 x i32> inreg %rsrc, float %v1 ; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX11-NEXT: buffer_store_b8 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_byte: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX12-NEXT: buffer_store_b8 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: %v2 = fptoui float %v1 to i32 %v3 = trunc i32 %v2 to i8 @@ -237,6 +310,18 @@ define amdgpu_ps void @struct_buffer_store_f16(<4 x i32> inreg %rsrc, float %v1, ; GFX11-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0 ; GFX11-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen ; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX12-TRUE16-LABEL: struct_buffer_store_f16: +; GFX12-TRUE16: ; %bb.0: +; GFX12-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0 +; GFX12-TRUE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen +; GFX12-TRUE16-NEXT: s_endpgm +; +; GFX12-FAKE16-LABEL: struct_buffer_store_f16: +; GFX12-FAKE16: ; %bb.0: +; GFX12-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GFX12-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen +; GFX12-FAKE16-NEXT: s_endpgm %v2 = fptrunc float %v1 to half call void @llvm.amdgcn.struct.buffer.store.f16(half %v2, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void @@ -261,6 +346,11 @@ define amdgpu_ps void @struct_buffer_store_v2f16(<4 x i32> inreg %rsrc, <2 x hal ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_v2f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } @@ -288,6 +378,11 @@ define amdgpu_ps void @struct_buffer_store_v4f16(<4 x i32> inreg %rsrc, <4 x hal ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_v4f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } @@ -304,6 +399,12 @@ define amdgpu_ps void @struct_buffer_store_i16(<4 x i32> inreg %rsrc, float %v1, ; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_i16: +; GFX12: ; %bb.0: ; %main_body +; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm main_body: %v2 = fptoui float %v1 to i32 %v3 = trunc i32 %v2 to i16 @@ -329,6 +430,11 @@ define amdgpu_ps void @struct_buffer_store_vif16(<4 x i32> inreg %rsrc, <2 x i16 ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_vif16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v2i16(<2 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } @@ -354,6 +460,11 @@ define amdgpu_ps void @struct_buffer_store_v4i16(<4 x i32> inreg %rsrc, <4 x i16 ; GFX11: ; %bb.0: ; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen ; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_buffer_store_v4i16: +; GFX12: ; %bb.0: +; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen +; GFX12-NEXT: s_endpgm call void @llvm.amdgcn.struct.buffer.store.v4i16(<4 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll index 8f33dd6..23db247 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll @@ -1,30 +1,58 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16 -; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16 -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL -; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16 +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16 +; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16 +; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16 define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB0_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB0_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB0_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB0_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB0_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -37,23 +65,43 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_const_idx(ptr addrspace(8) %ptr) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: .LBB1_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB1_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: .LBB1_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 15 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: .LBB1_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB1_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -66,26 +114,48 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_off: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB2_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB2_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_off: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB2_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_off: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB2_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB2_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -98,26 +168,49 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_soff: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB3_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB3_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_soff: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB3_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_soff: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_mov_b32 s5, 4 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB3_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB3_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -129,26 +222,48 @@ bb2: ret void } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_dlc: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB4_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB4_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_dlc: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB4_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_dlc: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB4_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB4_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -161,26 +276,49 @@ bb2: } define amdgpu_kernel void @struct_ptr_nonatomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_nonatomic_buffer_load_i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_mov_b32 s0, 0 -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 -; CHECK-NEXT: .LBB5_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) -; CHECK-NEXT: s_or_b32 s0, s1, s0 -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 -; CHECK-NEXT: s_cbranch_execnz .LBB5_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_nonatomic_buffer_load_i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_mov_b32 s0, 0 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX11-NEXT: .LBB5_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_or_b32 s0, s1, s0 +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_nonatomic_buffer_load_i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s0, 0 +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0 +; GFX12-NEXT: .LBB5_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_or_b32 s0, s1, s0 +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0 +; GFX12-NEXT: s_cbranch_execnz .LBB5_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -193,26 +331,49 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_i64: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v2, s6 -; CHECK-NEXT: .LBB6_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB6_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_i64: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v2, s6 +; GFX11-NEXT: .LBB6_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1] +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_i64: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: v_mov_b32_e32 v1, 0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v2, s6 +; GFX12-NEXT: .LBB6_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1] +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB6_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() %id.zext = zext i32 %id to i64 @@ -226,26 +387,48 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_v2i16: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB7_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB7_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_v2i16: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB7_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_v2i16: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB7_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB7_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -259,77 +442,172 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: -; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb -; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1 -; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-SDAG-TRUE16-NEXT: s_endpgm +; GFX11-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-SDAG-TRUE16-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-FAKE16: ; %bb.0: ; %bb +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-TRUE16-NEXT: s_endpgm +; +; GFX11-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX11-GISEL: ; %bb.0: ; %bb +; GFX11-GISEL-NEXT: s_clause 0x1 +; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-GISEL-NEXT: s_mov_b32 s4, 0 +; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1 +; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2 +; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3 +; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 +; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 +; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2 +; GFX11-GISEL-NEXT: s_endpgm ; -; CHECK-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: -; CHECK-FAKE16: ; %bb.0: ; %bb -; CHECK-FAKE16-NEXT: s_clause 0x1 -; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0 -; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1 -; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0) -; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 -; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2 -; CHECK-FAKE16-NEXT: s_endpgm +; GFX12-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb +; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1 +; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-SDAG-TRUE16-NEXT: s_endpgm ; -; CHECK-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16: -; CHECK-GISEL: ; %bb.0: ; %bb -; CHECK-GISEL-NEXT: s_clause 0x1 -; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-GISEL-NEXT: s_mov_b32 s4, 0 -; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1 -; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0) -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2 -; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3 -; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6 -; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0 -; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1 -; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2 -; CHECK-GISEL-NEXT: s_endpgm +; GFX12-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX12-FAKE16: ; %bb.0: ; %bb +; GFX12-FAKE16-NEXT: s_clause 0x1 +; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0 +; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0 +; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2 +; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-FAKE16-NEXT: s_endpgm +; +; GFX12-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16: +; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb +; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1 +; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0 +; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1 +; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0 +; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l +; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1 +; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2 +; GFX12-GISEL-TRUE16-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -344,26 +622,48 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i32: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB9_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB9_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_v4i32: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB9_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_v4i32: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB9_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB9_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 @@ -377,28 +677,52 @@ bb2: } define amdgpu_kernel void @struct_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr, i32 %index) { -; CHECK-LABEL: struct_ptr_atomic_buffer_load_ptr: -; CHECK: ; %bb.0: ; %bb -; CHECK-NEXT: s_clause 0x1 -; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34 -; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0 -; CHECK-NEXT: s_mov_b32 s4, 0 -; CHECK-NEXT: s_waitcnt lgkmcnt(0) -; CHECK-NEXT: v_mov_b32_e32 v1, s6 -; CHECK-NEXT: .LBB10_1: ; %bb1 -; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc -; CHECK-NEXT: s_waitcnt vmcnt(0) -; CHECK-NEXT: flat_load_b32 v2, v[2:3] -; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 -; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4 -; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) -; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 -; CHECK-NEXT: s_cbranch_execnz .LBB10_1 -; CHECK-NEXT: ; %bb.2: ; %bb2 -; CHECK-NEXT: s_endpgm +; GFX11-LABEL: struct_ptr_atomic_buffer_load_ptr: +; GFX11: ; %bb.0: ; %bb +; GFX11-NEXT: s_clause 0x1 +; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-NEXT: s_mov_b32 s4, 0 +; GFX11-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v1, s6 +; GFX11-NEXT: .LBB10_1: ; %bb1 +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: flat_load_b32 v2, v[2:3] +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX11-NEXT: s_cbranch_execnz .LBB10_1 +; GFX11-NEXT: ; %bb.2: ; %bb2 +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: struct_ptr_atomic_buffer_load_ptr: +; GFX12: ; %bb.0: ; %bb +; GFX12-NEXT: s_clause 0x1 +; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34 +; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX12-NEXT: s_wait_xcnt 0x0 +; GFX12-NEXT: s_mov_b32 s4, 0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mov_b32_e32 v1, s6 +; GFX12-NEXT: .LBB10_1: ; %bb1 +; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT +; GFX12-NEXT: s_wait_loadcnt 0x0 +; GFX12-NEXT: flat_load_b32 v2, v[2:3] +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0 +; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4 +; GFX12-NEXT: s_cbranch_execnz .LBB10_1 +; GFX12-NEXT: ; %bb.2: ; %bb2 +; GFX12-NEXT: s_endpgm bb: %id = tail call i32 @llvm.amdgcn.workitem.id.x() br label %bb1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll index 746b879..4366472 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll @@ -3,6 +3,7 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 { ; GFX908-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -39,6 +40,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -75,6 +84,13 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret void } @@ -114,6 +130,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret void } @@ -153,6 +177,14 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr ; GFX1200-NEXT: s_wait_kmcnt 0x0 ; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -291,6 +323,42 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_v ; GFX1200-NEXT: ; %bb.2: ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr0 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -429,6 +497,42 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr ; GFX1200-NEXT: ; %bb.2: ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr0 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB5_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll index 71c63bf..0191a85 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 { ; GFX90A-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -32,6 +33,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -62,6 +72,14 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffs ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -95,6 +113,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret float %ret } @@ -128,6 +155,15 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__ ; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %ret } @@ -237,6 +273,43 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_vo ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB4_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -346,6 +419,43 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__ ; GFX1200-NEXT: s_mov_b32 exec_lo, s2 ; GFX1200-NEXT: s_wait_loadcnt 0x0 ; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5 +; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB5_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret <2 x half> %ret } diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll index e3889ab..d551d91 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll @@ -4,7 +4,8 @@ ; Not supported in gfx8 or gfx9 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { ; GFX6-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffs ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret float %ret } @@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff ; GFX11-NEXT: buffer_atomic_max_f32 v0, v1, s[0:3], s16 idxen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret void } @@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen slc ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret void } @@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] -; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s1, s1 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: s_cbranch_execnz .LBB8_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] +; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1200-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5] +; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: v_readfirstlane_b32 s3, v7 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] -; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 -; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_b32 s0, s0, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s0, s0 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr7 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 -; GFX12-NEXT: s_cbranch_execnz .LBB9_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] +; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_b32 s0, s0, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr7 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1200-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll index f001bf9..0096289 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll @@ -4,7 +4,8 @@ ; Not supported in gfx8 or gfx9 ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s ; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s -; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) { ; GFX6-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: @@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret float %ret } @@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voff ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret float %ret } @@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret float %ret } @@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_ ; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0) ret void } @@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_ ; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret void @@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_vof ; GFX11-NEXT: buffer_atomic_min_f32 v0, v1, s[0:3], s16 idxen ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0) ret void } @@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_ ; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen slc ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2) ret void } @@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] -; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s1, s1 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1 -; GFX12-NEXT: s_cbranch_execnz .LBB8_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4] +; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1200-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5] +; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s1, s1 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1 +; GFX1250-NEXT: s_cbranch_execnz .LBB8_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret @@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: s_setpc_b64 s[30:31] ; -; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset: -; GFX12: ; %bb.0: -; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 -; GFX12-NEXT: s_wait_expcnt 0x0 -; GFX12-NEXT: s_wait_samplecnt 0x0 -; GFX12-NEXT: s_wait_bvhcnt 0x0 -; GFX12-NEXT: s_wait_kmcnt 0x0 -; GFX12-NEXT: s_mov_b32 s2, exec_lo -; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 -; GFX12-NEXT: v_readfirstlane_b32 s4, v1 -; GFX12-NEXT: v_readfirstlane_b32 s5, v2 -; GFX12-NEXT: v_readfirstlane_b32 s6, v3 -; GFX12-NEXT: v_readfirstlane_b32 s7, v4 -; GFX12-NEXT: v_readfirstlane_b32 s3, v7 -; GFX12-NEXT: s_wait_alu 0xf1ff -; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] -; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] -; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 -; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_b32 s0, s0, s1 -; GFX12-NEXT: s_wait_alu 0xfffe -; GFX12-NEXT: s_and_saveexec_b32 s0, s0 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN -; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 -; GFX12-NEXT: ; implicit-def: $vgpr7 -; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6 -; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0 -; GFX12-NEXT: s_cbranch_execnz .LBB9_1 -; GFX12-NEXT: ; %bb.2: -; GFX12-NEXT: s_mov_b32 exec_lo, s2 -; GFX12-NEXT: s_wait_loadcnt 0x0 -; GFX12-NEXT: s_setpc_b64 s[30:31] +; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset: +; GFX1200: ; %bb.0: +; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1200-NEXT: s_wait_expcnt 0x0 +; GFX1200-NEXT: s_wait_samplecnt 0x0 +; GFX1200-NEXT: s_wait_bvhcnt 0x0 +; GFX1200-NEXT: s_wait_kmcnt 0x0 +; GFX1200-NEXT: s_mov_b32 s2, exec_lo +; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1200-NEXT: v_readfirstlane_b32 s4, v1 +; GFX1200-NEXT: v_readfirstlane_b32 s5, v2 +; GFX1200-NEXT: v_readfirstlane_b32 s6, v3 +; GFX1200-NEXT: v_readfirstlane_b32 s7, v4 +; GFX1200-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1200-NEXT: s_wait_alu 0xf1ff +; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2] +; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4] +; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_b32 s0, s0, s1 +; GFX1200-NEXT: s_wait_alu 0xfffe +; GFX1200-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN +; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4 +; GFX1200-NEXT: ; implicit-def: $vgpr7 +; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6 +; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1200-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1200-NEXT: ; %bb.2: +; GFX1200-NEXT: s_mov_b32 exec_lo, s2 +; GFX1200-NEXT: s_wait_loadcnt 0x0 +; GFX1200-NEXT: s_setpc_b64 s[30:31] +; +; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset: +; GFX1250: ; %bb.0: +; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX1250-NEXT: s_wait_kmcnt 0x0 +; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4 +; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2 +; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6 +; GFX1250-NEXT: s_mov_b32 s2, exec_lo +; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_readfirstlane_b32 s4, v2 +; GFX1250-NEXT: v_readfirstlane_b32 s5, v3 +; GFX1250-NEXT: v_readfirstlane_b32 s6, v4 +; GFX1250-NEXT: v_readfirstlane_b32 s7, v5 +; GFX1250-NEXT: v_readfirstlane_b32 s3, v7 +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3] +; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5] +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7 +; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0 +; GFX1250-NEXT: s_and_b32 s0, s0, s1 +; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-NEXT: s_and_saveexec_b32 s0, s0 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN +; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5 +; GFX1250-NEXT: ; implicit-def: $vgpr7 +; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9 +; GFX1250-NEXT: s_wait_xcnt 0x0 +; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0 +; GFX1250-NEXT: s_cbranch_execnz .LBB9_1 +; GFX1250-NEXT: ; %bb.2: +; GFX1250-NEXT: s_mov_b32 exec_lo, s2 +; GFX1250-NEXT: s_wait_loadcnt 0x0 +; GFX1250-NEXT: s_set_pc_i64 s[30:31] %voffset.add = add i32 %voffset, 256 %ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0) ret float %ret diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll index 42401af..8304be9 100644 --- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll +++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll @@ -78,12 +78,14 @@ define amdgpu_kernel void @fadd_v2_vs(ptr addrspace(1) %a, <2 x float> %x) { ; GFX1250-LABEL: fadd_v2_vs: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -142,13 +144,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[4:5] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[6:7] +; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v4_vs: @@ -156,13 +161,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[4:5] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[6:7] +; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id @@ -332,56 +340,69 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fadd_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1] -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96 -; GFX1250-SDAG-NEXT: s_clause 0x1 -; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4 -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35] +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112 +; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 +; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[12:13] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[14:15] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26 +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[10:11] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[16:17] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[40:41] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[36:37] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[38:39] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[48:49] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[38:39] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[44:45] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[46:47] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[50:51] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[36:37] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[42:43] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[18:19] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[20:21] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[22:23] -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[8:9] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[54:55] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[56:57] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[52:53] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[42:43] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[48:49] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[50:51] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[44:45] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[46:47] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[32:33] +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[36:37] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35] +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v32_vs: @@ -389,54 +410,70 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[16:17] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[20:21] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[22:23] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[24:25] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[26:27] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[28:29] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[30:31] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[44:45] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[46:47] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[2:3] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[48:49] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[50:51] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[4:5] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[6:7] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[52:53] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[54:55] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[8:9] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[10:11] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[34:35] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[12:13] -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[14:15] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -502,15 +539,16 @@ define amdgpu_kernel void @fadd_v2_v_imm(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fadd_v2_v_imm: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -645,15 +683,16 @@ define amdgpu_kernel void @fadd_v2_v_lit_splat(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fadd_v2_v_lit_splat: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1.0 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -703,13 +742,15 @@ define amdgpu_kernel void @fadd_v2_v_lit_hi0(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fadd_v2_v_lit_hi0: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x3f800000 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -746,17 +787,31 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) { ; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; PACKED-NEXT: s_endpgm ; -; GFX1250-LABEL: fadd_v2_v_lit_lo0: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000) -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset -; GFX1250-NEXT: s_endpgm +; GFX1250-SDAG-LABEL: fadd_v2_v_lit_lo0: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x3f80000000000000) +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: fadd_v2_v_lit_lo0: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id %load = load <2 x float>, ptr addrspace(1) %gep, align 8 @@ -792,17 +847,31 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; PACKED-NEXT: s_endpgm ; -; GFX1250-LABEL: fadd_v2_v_unfoldable_lit: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset -; GFX1250-NEXT: s_endpgm +; GFX1250-SDAG-LABEL: fadd_v2_v_unfoldable_lit: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x400000003f800000) +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: fadd_v2_v_unfoldable_lit: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id %load = load <2 x float>, ptr addrspace(1) %gep, align 8 @@ -1085,12 +1154,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_lo2(ptr addrspace(1) %a, float %x, flo ; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_lo2: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] neg_lo:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] neg_lo:[0,1] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_lo2: @@ -1159,12 +1230,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_hi2(ptr addrspace(1) %a, float %x, flo ; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_hi2: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_hi2: @@ -1262,12 +1335,14 @@ define amdgpu_kernel void @fmul_v2_vs(ptr addrspace(1) %a, <2 x float> %x) { ; GFX1250-LABEL: fmul_v2_vs: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -1326,13 +1401,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[4:5] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[6:7] +; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fmul_v4_vs: @@ -1340,13 +1418,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[4:5] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[6:7] +; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id @@ -1516,56 +1597,69 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fmul_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1] -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96 -; GFX1250-SDAG-NEXT: s_clause 0x1 -; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4 -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35] +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112 +; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 +; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[12:13] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[14:15] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26 +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39] +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9 +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11 ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[10:11] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[16:17] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[40:41] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[36:37] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[38:39] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[48:49] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[34:35] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[38:39] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[44:45] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[46:47] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[50:51] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[36:37] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[42:43] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[18:19] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[20:21] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[22:23] -; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[8:9] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[54:55] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[56:57] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[52:53] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[42:43] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[48:49] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[50:51] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[44:45] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[46:47] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[32:33] +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[36:37] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35] +; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fmul_v32_vs: @@ -1573,54 +1667,70 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[16:17] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[20:21] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[22:23] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[24:25] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[26:27] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[28:29] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[30:31] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[44:45] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[46:47] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[2:3] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[48:49] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[50:51] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[4:5] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[6:7] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[52:53] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[54:55] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[8:9] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[10:11] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[34:35] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[12:13] -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[14:15] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -1685,15 +1795,16 @@ define amdgpu_kernel void @fmul_v2_v_imm(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fmul_v2_v_imm: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -1828,15 +1939,16 @@ define amdgpu_kernel void @fmul_v2_v_lit_splat(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fmul_v2_v_lit_splat: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -1873,17 +1985,31 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] ; PACKED-NEXT: s_endpgm ; -; GFX1250-LABEL: fmul_v2_v_unfoldable_lit: -; GFX1250: ; %bb.0: -; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) -; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset -; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset -; GFX1250-NEXT: s_endpgm +; GFX1250-SDAG-LABEL: fmul_v2_v_unfoldable_lit: +; GFX1250-SDAG: ; %bb.0: +; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 +; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: s_endpgm +; +; GFX1250-GISEL-LABEL: fmul_v2_v_unfoldable_lit: +; GFX1250-GISEL: ; %bb.0: +; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) +; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id %load = load <2 x float>, ptr addrspace(1) %gep, align 8 @@ -2040,12 +2166,14 @@ define amdgpu_kernel void @fma_v2_vs(ptr addrspace(1) %a, <2 x float> %x) { ; GFX1250-LABEL: fma_v2_vs: ; GFX1250: ; %bb.0: ; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0 ; GFX1250-NEXT: s_wait_kmcnt 0x0 -; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset +; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-NEXT: s_wait_loadcnt 0x0 -; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[2:3] -; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[2:3] +; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset ; GFX1250-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -2104,13 +2232,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-SDAG-NEXT: s_clause 0x1 ; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[4:5], v[4:5] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[6:7], v[6:7] +; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v4_vs: @@ -2118,13 +2249,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) { ; GFX1250-GISEL-NEXT: s_clause 0x1 ; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3] -; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[4:5], v[4:5] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[6:7], v[6:7] +; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id @@ -2294,56 +2428,68 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; ; GFX1250-SDAG-LABEL: fma_v32_vs: ; GFX1250-SDAG: ; %bb.0: -; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v34, 7, v0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16 -; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1] -; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96 -; GFX1250-SDAG-NEXT: s_clause 0x1 -; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4 -; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4 -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v34, s[34:35] offset:16 +; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v34, s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v34, s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v34, s[34:35] +; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v34, s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v34, s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v34, s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v34, s[34:35] offset:112 +; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 +; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 +; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[12:13], s[12:13] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[14:15], s[14:15] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[30:31] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[28:29] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[12:13] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[56:57], s[14:15] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[2:3] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[4:5] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[6:7] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[24:25] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[26:27] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[18:19] +; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7 +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[8:9] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[10:11] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[10:11], s[10:11] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[16:17], s[16:17] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[40:41], s[40:41] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[38:39], s[38:39] -; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[48:49], s[48:49] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[42:43], v[42:43] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[40:41], v[40:41] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[16:17] ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[44:45], s[44:45] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[46:47], s[46:47] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[50:51], s[50:51] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[36:37], s[36:37] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[42:43], s[42:43] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[18:19], s[18:19] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[20:21], s[20:21] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[22:23], s[22:23] -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[8:9], s[8:9] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[54:55], v[54:55] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[36:37], v[36:37] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[38:39], v[38:39] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[56:57], v[56:57] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[52:53], v[52:53] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[42:43], v[42:43] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[48:49], v[48:49] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[50:51], v[50:51] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[44:45], v[44:45] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[46:47], v[46:47] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[32:33], v[32:33] +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[40:41], v[40:41] ; GFX1250-SDAG-NEXT: s_clause 0x7 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48 -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1] -; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[16:19], s[34:35] offset:96 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[12:15], s[34:35] offset:112 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[8:11], s[34:35] offset:64 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[4:7], s[34:35] offset:80 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[20:23], s[34:35] offset:32 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[24:27], s[34:35] offset:48 +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[0:3], s[34:35] +; GFX1250-SDAG-NEXT: global_store_b128 v34, v[28:31], s[34:35] offset:16 ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v32_vs: @@ -2351,54 +2497,70 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) { ; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0 +; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35] -; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35] +; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4 -; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[16:17], s[16:17] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[18:19], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7] +; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7 +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[20:21], s[20:21] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[22:23], s[22:23] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[24:25], s[24:25] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[26:27], s[26:27] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[28:29], s[28:29] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[30:31], s[30:31] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[44:45], v[44:45] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[46:47], v[46:47] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[0:1], s[0:1] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[48:49], v[48:49] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[50:51], v[50:51] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[4:5], s[4:5] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[6:7], s[6:7] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[52:53], v[52:53] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[54:55], v[54:55] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[8:9], s[8:9] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[10:11], s[10:11] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[32:33], v[32:33] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[34:35], v[34:35] ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[12:13], s[12:13] -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[14:15], s[14:15] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37] +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39] ; GFX1250-GISEL-NEXT: s_clause 0x7 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35] -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96 -; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35] +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96 +; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112 ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id @@ -2488,17 +2650,19 @@ define amdgpu_kernel void @fma_v2_v_imm(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fma_v2_v_imm: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 0x43480000 ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 ; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -2653,17 +2817,19 @@ define amdgpu_kernel void @fma_v2_v_lit_splat(ptr addrspace(1) %a) { ; GFX1250-GISEL-LABEL: fma_v2_v_lit_splat: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0 ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1.0 ; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2 ; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -2740,29 +2906,30 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) { ; GFX1250-SDAG-LABEL: fma_v2_v_unfoldable_lit: ; GFX1250-SDAG: ; %bb.0: ; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000) -; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 -; GFX1250-SDAG-NEXT: s_mov_b64 s[4:5], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: v_and_b32_e32 v6, 0x3ff, v0 +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000) +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], lit64(0x400000003f800000) ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0 -; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[4:5], s[2:3] -; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-SDAG-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fma_v2_v_unfoldable_lit: ; GFX1250-GISEL: ; %bb.0: ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 -; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0 +; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0 ; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000) ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], lit64(0x400000003f800000) +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5] ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset +; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5] -; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset +; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5] +; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset ; GFX1250-GISEL-NEXT: s_endpgm %id = tail call i32 @llvm.amdgcn.workitem.id.x() %gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id @@ -3268,20 +3435,22 @@ define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) { ; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3) ; GFX1250-SDAG-NEXT: s_add_f32 s1, s1, 0 ; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 -; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1] +; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_fadd_fsub_0: ; GFX1250-GISEL: ; %bb.0: ; %bb ; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], 0 +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v0, v1 ; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v3, v0 -; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3] scope:SCOPE_SE ; GFX1250-GISEL-NEXT: s_endpgm bb: %i12 = fadd <2 x float> zeroinitializer, %arg @@ -3363,15 +3532,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0 ; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 -; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, 0 ; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0 -; GFX1250-SDAG-NEXT: s_add_f32 s6, s1, s3 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], s[2:3], s[6:7] op_sel_hi:[1,0] -; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, v0 -; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) -; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[2:3], s[2:3] neg_lo:[0,1] neg_hi:[0,1] -; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[4:5] +; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3] +; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3 +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3) +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[0:1], s[2:3] op_sel_hi:[1,0] +; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2 +; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0 +; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1] +; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5] ; GFX1250-SDAG-NEXT: s_endpgm ; ; GFX1250-GISEL-LABEL: fadd_fadd_fsub: @@ -3380,13 +3550,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p ; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0 ; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], s[2:3] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3] ; GFX1250-GISEL-NEXT: s_sub_f32 s0, s0, s2 -; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3) -; GFX1250-GISEL-NEXT: v_dual_mov_b32 v0, v1 :: v_dual_mov_b32 v2, s0 ; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) -; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[2:3], v[0:1] -; GFX1250-GISEL-NEXT: v_dual_subrev_f32 v3, s3, v0 :: v_dual_mov_b32 v0, 0 +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1 +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1] +; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0 +; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5] ; GFX1250-GISEL-NEXT: s_endpgm bb: @@ -3593,7 +3766,9 @@ define amdgpu_kernel void @fneg_v2f32_scalar(ptr addrspace(1) %a, <2 x float> %x ; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0 ; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0 -; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, s[2:3] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1] +; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3] +; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, v[0:1] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1] ; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] ; GFX1250-GISEL-NEXT: s_endpgm %fneg = fsub <2 x float> <float -0.0, float -0.0>, %x diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s new file mode 100644 index 0000000..1ea64de --- /dev/null +++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s @@ -0,0 +1,74 @@ +// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5 +// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s + +v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], v[0:1], s[0:1], v[4:5] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], v[0:1], v[4:5], s[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] op_sel:[1,0,0] op_sel_hi:[0,0,0] +// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] op_sel:[1,0,0] op_sel_hi:[1,0,0] +// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], v[0:1], s[0:1], v[4:5] op_sel:[0,1,0] op_sel_hi:[0,0,0] +// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], v[0:1], v[4:5], s[0:1] op_sel:[0,0,1] op_sel_hi:[0,0,0] +// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], s[0:1], v[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], v[0:1], s[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], s[0:1], v[0:1] op_sel:[1,0] op_sel_hi:[0,0] +// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,0] +// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,1] +// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], s[0:1], v[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], v[0:1], s[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], s[0:1], v[0:1] op_sel:[1,0] op_sel_hi:[0,0] +// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,0] +// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,1] +// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], exec, v[0:1], v[4:5] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], v[0:1], exec, v[4:5] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_fma_f32 v[8:9], v[0:1], v[4:5], exec +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], exec, v[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_mul_f32 v[8:9], v[0:1], exec +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], exec, v[0:1] +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand + +v_pk_add_f32 v[8:9], v[0:1], exec +// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp index d6b578a..b7a060b 100644 --- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp +++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp @@ -23,6 +23,7 @@ #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +#include <cstdlib> #include <optional> using namespace llvm; @@ -5360,6 +5361,144 @@ TEST_F(OpenMPIRBuilderTest, CreateReductions) { EXPECT_TRUE(findGEPZeroOne(ReductionFn->getArg(1), FirstRHS, SecondRHS)); } +static void createScan(llvm::Value *scanVar, llvm::Type *scanType, + OpenMPIRBuilder &OMPBuilder, IRBuilder<> &Builder, + OpenMPIRBuilder::LocationDescription Loc, + OpenMPIRBuilder::InsertPointTy &allocaIP, + ScanInfo *&ScanRedInfo) { + using InsertPointTy = OpenMPIRBuilder::InsertPointTy; + ASSERT_EXPECTED_INIT(InsertPointTy, retIp, + OMPBuilder.createScan(Loc, allocaIP, {scanVar}, + {scanType}, true, ScanRedInfo)); + Builder.restoreIP(retIp); +} +/* + Following is the pseudocode of the code generated by the test case + <declare pointer to buffer> ptr + size num_iters = 100 + // temp buffer allocation + omp masked { + buff = malloc(num_iters*scanvarstype) + *ptr = buff + } + barrier; + // input phase loop + for (i: 0..<num_iters>) { + <input phase>; + buffer = *ptr; + buffer[i] = red; + } + // scan reduction + omp masked + { + for (int k = 0; k != ceil(log2(num_iters)); ++k) { + i=pow(2,k) + for (size cnt = last_iter; cnt >= i; --cnt) { + buffer = *ptr; + buffer[cnt] op= buffer[cnt-i]; + } + } + } + barrier; + // scan phase loop + for (0..<num_iters>) { + buffer = *ptr; + red = buffer[i] ; + <scan phase>; + } + // temp buffer deletion + omp masked { + free(*ptr) + } + barrier; +*/ +TEST_F(OpenMPIRBuilderTest, ScanReduction) { + using InsertPointTy = OpenMPIRBuilder::InsertPointTy; + OpenMPIRBuilder OMPBuilder(*M); + OMPBuilder.initialize(); + IRBuilder<> Builder(BB); + OpenMPIRBuilder::LocationDescription Loc({Builder.saveIP(), DL}); + Value *TripCount = F->getArg(0); + Type *LCTy = TripCount->getType(); + Value *StartVal = ConstantInt::get(LCTy, 1); + Value *StopVal = ConstantInt::get(LCTy, 100); + Value *Step = ConstantInt::get(LCTy, 1); + auto AllocaIP = Builder.saveIP(); + + llvm::Value *ScanVar = Builder.CreateAlloca(Builder.getFloatTy()); + llvm::Value *OrigVar = Builder.CreateAlloca(Builder.getFloatTy()); + unsigned NumBodiesGenerated = 0; + ScanInfo *ScanRedInfo; + ASSERT_EXPECTED_INIT(ScanInfo *, ScanInformation, + OMPBuilder.scanInfoInitialize()); + ScanRedInfo = ScanInformation; + auto LoopBodyGenCB = [&](InsertPointTy CodeGenIP, llvm::Value *LC) { + NumBodiesGenerated += 1; + Builder.restoreIP(CodeGenIP); + createScan(ScanVar, Builder.getFloatTy(), OMPBuilder, Builder, Loc, + AllocaIP, ScanRedInfo); + return Error::success(); + }; + llvm::SmallVector<CanonicalLoopInfo *> loops; + ASSERT_EXPECTED_INIT(llvm::SmallVector<CanonicalLoopInfo *>, loopvec, + OMPBuilder.createCanonicalScanLoops( + Loc, LoopBodyGenCB, StartVal, StopVal, Step, false, + false, Builder.saveIP(), "scan", ScanRedInfo)); + loops = loopvec; + CanonicalLoopInfo *InputLoop = loops.front(); + CanonicalLoopInfo *ScanLoop = loops.back(); + Builder.restoreIP(ScanLoop->getAfterIP()); + InputLoop->assertOK(); + ScanLoop->assertOK(); + + EXPECT_EQ(ScanLoop->getAfter(), Builder.GetInsertBlock()); + EXPECT_EQ(NumBodiesGenerated, 2U); + SmallVector<OpenMPIRBuilder::ReductionInfo> ReductionInfos = { + {Builder.getFloatTy(), OrigVar, ScanVar, + /*EvaluationKind=*/OpenMPIRBuilder::EvalKind::Scalar, sumReduction, + /*ReductionGenClang=*/nullptr, sumAtomicReduction}}; + OpenMPIRBuilder::LocationDescription RedLoc({InputLoop->getAfterIP(), DL}); + llvm::BasicBlock *Cont = splitBB(Builder, false, "omp.scan.loop.cont"); + ASSERT_EXPECTED_INIT( + InsertPointTy, retIp, + OMPBuilder.emitScanReduction(RedLoc, ReductionInfos, ScanRedInfo)); + Builder.restoreIP(retIp); + Builder.CreateBr(Cont); + Builder.SetInsertPoint(Cont); + unsigned NumMallocs = 0; + unsigned NumFrees = 0; + unsigned NumMasked = 0; + unsigned NumEndMasked = 0; + unsigned NumLog = 0; + unsigned NumCeil = 0; + for (Instruction &I : instructions(F)) { + if (!isa<CallInst>(I)) + continue; + CallInst *Call = dyn_cast<CallInst>(&I); + StringRef Name = Call->getCalledFunction()->getName(); + if (Name.equals_insensitive("malloc")) { + NumMallocs += 1; + } else if (Name.equals_insensitive("free")) { + NumFrees += 1; + } else if (Name.equals_insensitive("__kmpc_masked")) { + NumMasked += 1; + } else if (Name.equals_insensitive("__kmpc_end_masked")) { + NumEndMasked += 1; + } else if (Name.equals_insensitive("llvm.log2.f64")) { + NumLog += 1; + } else if (Name.equals_insensitive("llvm.ceil.f64")) { + NumCeil += 1; + } + } + EXPECT_EQ(NumBodiesGenerated, 2U); + EXPECT_EQ(NumMasked, 3U); + EXPECT_EQ(NumEndMasked, 3U); + EXPECT_EQ(NumMallocs, 1U); + EXPECT_EQ(NumFrees, 1U); + EXPECT_EQ(NumLog, 1U); + EXPECT_EQ(NumCeil, 1U); +} + TEST_F(OpenMPIRBuilderTest, CreateTwoReductions) { using InsertPointTy = OpenMPIRBuilder::InsertPointTy; OpenMPIRBuilder OMPBuilder(*M); diff --git a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td index 92aacda..2c64693 100644 --- a/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td +++ b/mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td @@ -907,7 +907,8 @@ def AMDGPU_GatherToLDSOp : The elements gathered by the subgroup will be written contiguously in order of lane ID starting at `$dst[$dstIndices]`. Byte-sized (ex. i8) or short-sized (ex. i16) types will be zero-padded/extended to 32 bits before being written. 96-bit types - (ex. vector<3xf32>) will be zero-padded to 128 bits before being written. + (ex. vector<3xf32>) will be zero-padded to 128 bits before being written. Only the + offsets held by lane 0 are used. * `$transferType`: type of the data to be transferred by each thread. This is used to determine the size of the data to be transferred and the number of threads in the subgroup. The transfer type must be a scalar type or a vector type with a single element type. diff --git a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp index 9a0a230..d7ffdcb 100644 --- a/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp +++ b/mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp @@ -518,8 +518,8 @@ LogicalResult GatherToLDSOp::verify() { MemRefType srcType = cast<MemRefType>(getSrc().getType()); MemRefType dstType = cast<MemRefType>(getDst().getType()); - if (!dstType.areTrailingDimsContiguous(dstType.getRank())) - return emitOpError("destination types must be contiguous"); + if (!dstType.areTrailingDimsContiguous(1)) + return emitOpError("destination type inner most dim must be contiguous"); auto elemType = srcType.getElementType(); // Check $src and $dst element types are the same. diff --git a/mlir/test/Dialect/AMDGPU/invalid.mlir b/mlir/test/Dialect/AMDGPU/invalid.mlir index 0d2fd24..66e7dd4 100644 --- a/mlir/test/Dialect/AMDGPU/invalid.mlir +++ b/mlir/test/Dialect/AMDGPU/invalid.mlir @@ -230,3 +230,11 @@ func.func @gather_to_lds_non_lds(%idx1 : index, %mem1 : memref<32xf16>, %mem2 : amdgpu.gather_to_lds %mem1[%idx1], %mem2[%idx1] : vector<2xf16>, memref<32xf16>, memref<32xf16> func.return } + +// ----- + +func.func @gather_to_lds_non_lds(%idx1 : index, %mem1 : memref<32xf16>, %mem2 : memref<32xf16, strided<[?]>, #gpu.address_space<workgroup>>) { + // expected-error@+1 {{'amdgpu.gather_to_lds' op destination type inner most dim must be contiguous}} + amdgpu.gather_to_lds %mem1[%idx1], %mem2[%idx1] : vector<2xf16>, memref<32xf16>, memref<32xf16, strided<[?]>, #gpu.address_space<workgroup>> + func.return +} diff --git a/mlir/test/Dialect/AMDGPU/ops.mlir b/mlir/test/Dialect/AMDGPU/ops.mlir index fe78b53..87e11c0 100644 --- a/mlir/test/Dialect/AMDGPU/ops.mlir +++ b/mlir/test/Dialect/AMDGPU/ops.mlir @@ -539,13 +539,15 @@ func.func @transpose_load(%idx1 : index, %idx2 : index, %mem : memref<128x32xf16 } // CHECK-LABEL: func @gather_to_lds -func.func @gather_to_lds(%idx1 : index, %idx2 : index, %mem1 : memref<32xf16>, %mem2 : memref<32x32xf16>, %smem1 : memref<32xf16, #gpu.address_space<workgroup>>, %smem2 : memref<32x32xf16, #gpu.address_space<workgroup>>) { +func.func @gather_to_lds(%idx1 : index, %idx2 : index, %mem1 : memref<32xf16>, %mem2 : memref<32x32xf16>, %smem1 : memref<32xf16, #gpu.address_space<workgroup>>, %smem2 : memref<32x32xf16, #gpu.address_space<workgroup>>, %smem3 : memref<?x?xf16, strided<[?, 1]>, #gpu.address_space<workgroup>>) { // CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}] // CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}}] // CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}] + // CHECK: amdgpu.gather_to_lds %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}, %{{.*}}] amdgpu.gather_to_lds %mem2[%idx1, %idx2], %smem2[%idx1, %idx2] : vector<2xf16>, memref<32x32xf16>, memref<32x32xf16, #gpu.address_space<workgroup>> amdgpu.gather_to_lds %mem2[%idx1, %idx2], %smem1[%idx1] : vector<2xf16>, memref<32x32xf16>, memref<32xf16, #gpu.address_space<workgroup>> amdgpu.gather_to_lds %mem1[%idx1], %smem2[%idx1, %idx2] : vector<2xf16>, memref<32xf16>, memref<32x32xf16, #gpu.address_space<workgroup>> + amdgpu.gather_to_lds %mem1[%idx1], %smem3[%idx1, %idx2] : vector<2xf16>, memref<32xf16>, memref<?x?xf16, strided<[?, 1]>, #gpu.address_space<workgroup>> func.return } diff --git a/offload/unittests/CMakeLists.txt b/offload/unittests/CMakeLists.txt index a0d5c01..308849a 100644 --- a/offload/unittests/CMakeLists.txt +++ b/offload/unittests/CMakeLists.txt @@ -18,6 +18,9 @@ endif () set(OFFLOAD_UNITTESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}) function(add_offload_test_device_code test_filename test_name) + cmake_parse_arguments( + "OFFLOAD_TESTS" "WITH_DEVICE_MATH_LIBS" "" "" ${ARGN}) + set(SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/${test_filename}) set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) @@ -37,13 +40,25 @@ function(add_offload_test_device_code test_filename test_name) endif() if(nvptx_arch AND CUDAToolkit_FOUND) + set(nvptx_compile_flags ${OFFLOAD_TESTS_UNPARSED_ARGUMENTS}) + + if(OFFLOAD_TESTS_WITH_DEVICE_MATH_LIBS) + file(GLOB libdevice_paths "${CUDAToolkit_LIBRARY_ROOT}/nvvm/libdevice/libdevice.*.bc") + if(libdevice_paths) + list(GET libdevice_paths 0 libdevice_path) + list(APPEND nvptx_compile_flags "-Xclang" "-mlink-builtin-bitcode") + list(APPEND nvptx_compile_flags "-Xclang" "${libdevice_path}") + list(APPEND nvptx_compile_flags "-DCUDA_MATH_FOUND=1") + endif() + endif() + set(output_file "${CMAKE_CURRENT_BINARY_DIR}/${test_name}.nvptx64.bin") add_custom_command( OUTPUT ${output_file} COMMAND ${CMAKE_CXX_COMPILER} -I${OFFLOAD_UNITTESTS_DIR} --target=nvptx64-nvidia-cuda -march=${nvptx_arch} - -nogpulib --cuda-path=${cuda_path} -flto ${ARGN} + -nogpulib --cuda-path=${cuda_path} -flto ${nvptx_compile_flags} ${SRC_PATH} -o ${output_file} DEPENDS ${SRC_PATH} ) @@ -62,13 +77,25 @@ function(add_offload_test_device_code test_filename test_name) endif() if(amdgpu_arch) + set(amdgpu_compile_flags ${OFFLOAD_TESTS_UNPARSED_ARGUMENTS}) + + if(OFFLOAD_TESTS_WITH_DEVICE_MATH_LIBS) + find_package(AMDDeviceLibs QUIET HINTS ${CMAKE_INSTALL_PREFIX} PATHS /opt/rocm) + if(AMDDeviceLibs_FOUND) + get_target_property(ocml_path ocml IMPORTED_LOCATION) + list(APPEND amdgpu_compile_flags "-Xclang" "-mlink-builtin-bitcode") + list(APPEND amdgpu_compile_flags "-Xclang" "${ocml_path}") + list(APPEND amdgpu_compile_flags "-DHIP_MATH_FOUND=1") + endif() + endif() + set(output_file "${CMAKE_CURRENT_BINARY_DIR}/${test_name}.amdgpu.bin") add_custom_command( OUTPUT ${output_file} COMMAND ${CMAKE_CXX_COMPILER} -I${OFFLOAD_UNITTESTS_DIR} --target=amdgcn-amd-amdhsa -mcpu=${amdgpu_arch} - -nogpulib -flto ${ARGN} ${SRC_PATH} -o ${output_file} + -nogpulib -flto ${amdgpu_compile_flags} ${SRC_PATH} -o ${output_file} DEPENDS ${SRC_PATH} ) add_custom_target(${test_name}.amdgpu DEPENDS ${output_file}) diff --git a/offload/unittests/Conformance/device_code/CMakeLists.txt b/offload/unittests/Conformance/device_code/CMakeLists.txt index 789dd16..a0c5369 100644 --- a/offload/unittests/Conformance/device_code/CMakeLists.txt +++ b/offload/unittests/Conformance/device_code/CMakeLists.txt @@ -1,4 +1,10 @@ +add_offload_test_device_code(CUDAMath.cpp cuda-math WITH_DEVICE_MATH_LIBS -O3 -stdlib -fno-builtin) +add_offload_test_device_code(HIPMath.cpp hip-math WITH_DEVICE_MATH_LIBS -O3 -stdlib -fno-builtin) add_offload_test_device_code(LLVMLibm.cpp llvm-libm -O3 -stdlib -fno-builtin) -add_custom_target(conformance_device_binaries DEPENDS llvm-libm.bin) +add_custom_target(conformance_device_binaries DEPENDS + cuda-math.bin + hip-math.bin + llvm-libm.bin +) set(OFFLOAD_CONFORMANCE_DEVICE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE) diff --git a/offload/unittests/Conformance/device_code/CUDAMath.cpp b/offload/unittests/Conformance/device_code/CUDAMath.cpp new file mode 100644 index 0000000..a351e92 --- /dev/null +++ b/offload/unittests/Conformance/device_code/CUDAMath.cpp @@ -0,0 +1,178 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the implementation of the device kernels that wrap the +/// math functions from the cuda-math provider. +/// +//===----------------------------------------------------------------------===// + +#ifdef CUDA_MATH_FOUND + +#include "Conformance/device_code/DeviceAPIs.hpp" +#include "Conformance/device_code/KernelRunner.hpp" + +#include <gpuintrin.h> +#include <stddef.h> + +using namespace kernels; + +//===----------------------------------------------------------------------===// +// Helpers +//===----------------------------------------------------------------------===// + +static inline float sincosfSin(float X) { + float SinX, CosX; + __nv_sincosf(X, &SinX, &CosX); + return SinX; +} + +static inline float sincosfCos(float X) { + float SinX, CosX; + __nv_sincosf(X, &SinX, &CosX); + return CosX; +} + +//===----------------------------------------------------------------------===// +// Kernels +//===----------------------------------------------------------------------===// + +extern "C" { + +__gpu_kernel void acosfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_acosf>(NumElements, Out, X); +} + +__gpu_kernel void acoshfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_acoshf>(NumElements, Out, X); +} + +__gpu_kernel void asinfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_asinf>(NumElements, Out, X); +} + +__gpu_kernel void asinhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_asinhf>(NumElements, Out, X); +} + +__gpu_kernel void atanfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_atanf>(NumElements, Out, X); +} + +__gpu_kernel void atanhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_atanhf>(NumElements, Out, X); +} + +__gpu_kernel void cbrtfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_cbrtf>(NumElements, Out, X); +} + +__gpu_kernel void cosfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_cosf>(NumElements, Out, X); +} + +__gpu_kernel void coshfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_coshf>(NumElements, Out, X); +} + +__gpu_kernel void cospifKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_cospif>(NumElements, Out, X); +} + +__gpu_kernel void erffKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_erff>(NumElements, Out, X); +} + +__gpu_kernel void expfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_expf>(NumElements, Out, X); +} + +__gpu_kernel void exp10fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_exp10f>(NumElements, Out, X); +} + +__gpu_kernel void exp2fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_exp2f>(NumElements, Out, X); +} + +__gpu_kernel void expm1fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_expm1f>(NumElements, Out, X); +} + +__gpu_kernel void logfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_logf>(NumElements, Out, X); +} + +__gpu_kernel void log10fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_log10f>(NumElements, Out, X); +} + +__gpu_kernel void log1pfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_log1pf>(NumElements, Out, X); +} + +__gpu_kernel void log2fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_log2f>(NumElements, Out, X); +} + +__gpu_kernel void sinfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_sinf>(NumElements, Out, X); +} + +__gpu_kernel void sincosfSinKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<sincosfSin>(NumElements, Out, X); +} + +__gpu_kernel void sincosfCosKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<sincosfCos>(NumElements, Out, X); +} + +__gpu_kernel void sinhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_sinhf>(NumElements, Out, X); +} + +__gpu_kernel void sinpifKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_sinpif>(NumElements, Out, X); +} + +__gpu_kernel void tanfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_tanf>(NumElements, Out, X); +} + +__gpu_kernel void tanhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__nv_tanhf>(NumElements, Out, X); +} +} // extern "C" + +#endif // CUDA_MATH_FOUND diff --git a/offload/unittests/Conformance/device_code/DeviceAPIs.hpp b/offload/unittests/Conformance/device_code/DeviceAPIs.hpp new file mode 100644 index 0000000..8476dcb --- /dev/null +++ b/offload/unittests/Conformance/device_code/DeviceAPIs.hpp @@ -0,0 +1,113 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains platform-specific definitions and forward declarations +/// for device-side APIs used by the kernels. +/// +//===----------------------------------------------------------------------===// + +#ifndef CONFORMANCE_DEVICE_CODE_DEVICEAPIS_HPP +#define CONFORMANCE_DEVICE_CODE_DEVICEAPIS_HPP + +#include <stdint.h> + +typedef _Float16 float16; + +#ifdef __AMDGPU__ + +// The ROCm device library uses control globals to alter codegen for the +// different targets. To avoid needing to link them in manually, we simply +// define them here. +extern "C" { +extern const inline uint8_t __oclc_unsafe_math_opt = 0; +extern const inline uint8_t __oclc_daz_opt = 0; +extern const inline uint8_t __oclc_correctly_rounded_sqrt32 = 1; +extern const inline uint8_t __oclc_finite_only_opt = 0; +extern const inline uint32_t __oclc_ISA_version = 9000; +} + +// These aliases cause Clang to emit the control constants with ODR linkage. +// This allows us to link against the symbols without preventing them from being +// optimized out or causing symbol collisions. +[[gnu::alias("__oclc_unsafe_math_opt")]] const uint8_t __oclc_unsafe_math_opt__; +[[gnu::alias("__oclc_daz_opt")]] const uint8_t __oclc_daz_opt__; +[[gnu::alias("__oclc_correctly_rounded_sqrt32")]] const uint8_t + __oclc_correctly_rounded_sqrt32__; +[[gnu::alias("__oclc_finite_only_opt")]] const uint8_t __oclc_finite_only_opt__; +[[gnu::alias("__oclc_ISA_version")]] const uint32_t __oclc_ISA_version__; + +#endif // __AMDGPU__ + +#ifdef CUDA_MATH_FOUND + +extern "C" { + +float __nv_acosf(float); +float __nv_acoshf(float); +float __nv_asinf(float); +float __nv_asinhf(float); +float __nv_atanf(float); +float __nv_atanhf(float); +float __nv_cbrtf(float); +float __nv_cosf(float); +float __nv_coshf(float); +float __nv_cospif(float); +float __nv_erff(float); +float __nv_expf(float); +float __nv_exp10f(float); +float __nv_exp2f(float); +float __nv_expm1f(float); +float __nv_logf(float); +float __nv_log10f(float); +float __nv_log1pf(float); +float __nv_log2f(float); +float __nv_sinf(float); +void __nv_sincosf(float, float *, float *); +float __nv_sinhf(float); +float __nv_sinpif(float); +float __nv_tanf(float); +float __nv_tanhf(float); +} // extern "C" + +#endif // CUDA_MATH_FOUND + +#ifdef HIP_MATH_FOUND + +extern "C" { + +float __ocml_acos_f32(float); +float __ocml_acosh_f32(float); +float __ocml_asin_f32(float); +float __ocml_asinh_f32(float); +float __ocml_atan_f32(float); +float __ocml_atanh_f32(float); +float __ocml_cbrt_f32(float); +float __ocml_cos_f32(float); +float __ocml_cosh_f32(float); +float __ocml_cospi_f32(float); +float __ocml_erf_f32(float); +float __ocml_exp_f32(float); +float __ocml_exp10_f32(float); +float __ocml_exp2_f32(float); +float __ocml_expm1_f32(float); +float __ocml_log_f32(float); +float __ocml_log10_f32(float); +float __ocml_log1p_f32(float); +float __ocml_log2_f32(float); +float __ocml_sin_f32(float); +float __ocml_sincos_f32(float, float *); +float __ocml_sinh_f32(float); +float __ocml_sinpi_f32(float); +float __ocml_tan_f32(float); +float __ocml_tanh_f32(float); +} // extern "C" + +#endif // HIP_MATH_FOUND + +#endif // CONFORMANCE_DEVICE_CODE_DEVICEAPIS_HPP diff --git a/offload/unittests/Conformance/device_code/HIPMath.cpp b/offload/unittests/Conformance/device_code/HIPMath.cpp new file mode 100644 index 0000000..36efe6b --- /dev/null +++ b/offload/unittests/Conformance/device_code/HIPMath.cpp @@ -0,0 +1,178 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file contains the implementation of the device kernels that wrap the +/// math functions from the hip-math provider. +/// +//===----------------------------------------------------------------------===// + +#ifdef HIP_MATH_FOUND + +#include "Conformance/device_code/DeviceAPIs.hpp" +#include "Conformance/device_code/KernelRunner.hpp" + +#include <gpuintrin.h> +#include <stddef.h> + +using namespace kernels; + +//===----------------------------------------------------------------------===// +// Helpers +//===----------------------------------------------------------------------===// + +static inline float sincosfSin(float X) { + float CosX; + float SinX = __ocml_sincos_f32(X, &CosX); + return SinX; +} + +static inline float sincosfCos(float X) { + float CosX; + float SinX = __ocml_sincos_f32(X, &CosX); + return CosX; +} + +//===----------------------------------------------------------------------===// +// Kernels +//===----------------------------------------------------------------------===// + +extern "C" { + +__gpu_kernel void acosfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_acos_f32>(NumElements, Out, X); +} + +__gpu_kernel void acoshfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_acosh_f32>(NumElements, Out, X); +} + +__gpu_kernel void asinfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_asin_f32>(NumElements, Out, X); +} + +__gpu_kernel void asinhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_asinh_f32>(NumElements, Out, X); +} + +__gpu_kernel void atanfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_atan_f32>(NumElements, Out, X); +} + +__gpu_kernel void atanhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_atanh_f32>(NumElements, Out, X); +} + +__gpu_kernel void cbrtfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_cbrt_f32>(NumElements, Out, X); +} + +__gpu_kernel void cosfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_cos_f32>(NumElements, Out, X); +} + +__gpu_kernel void coshfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_cosh_f32>(NumElements, Out, X); +} + +__gpu_kernel void cospifKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_cospi_f32>(NumElements, Out, X); +} + +__gpu_kernel void erffKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_erf_f32>(NumElements, Out, X); +} + +__gpu_kernel void expfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_exp_f32>(NumElements, Out, X); +} + +__gpu_kernel void exp10fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_exp10_f32>(NumElements, Out, X); +} + +__gpu_kernel void exp2fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_exp2_f32>(NumElements, Out, X); +} + +__gpu_kernel void expm1fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_expm1_f32>(NumElements, Out, X); +} + +__gpu_kernel void logfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_log_f32>(NumElements, Out, X); +} + +__gpu_kernel void log10fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_log10_f32>(NumElements, Out, X); +} + +__gpu_kernel void log1pfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_log1p_f32>(NumElements, Out, X); +} + +__gpu_kernel void log2fKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_log2_f32>(NumElements, Out, X); +} + +__gpu_kernel void sinfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_sin_f32>(NumElements, Out, X); +} + +__gpu_kernel void sincosfSinKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<sincosfSin>(NumElements, Out, X); +} + +__gpu_kernel void sincosfCosKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<sincosfCos>(NumElements, Out, X); +} + +__gpu_kernel void sinhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_sinh_f32>(NumElements, Out, X); +} + +__gpu_kernel void sinpifKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_sinpi_f32>(NumElements, Out, X); +} + +__gpu_kernel void tanfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_tan_f32>(NumElements, Out, X); +} + +__gpu_kernel void tanhfKernel(const float *X, float *Out, + size_t NumElements) noexcept { + runKernelBody<__ocml_tanh_f32>(NumElements, Out, X); +} +} // extern "C" + +#endif // HIP_MATH_FOUND diff --git a/offload/unittests/Conformance/device_code/Common.hpp b/offload/unittests/Conformance/device_code/KernelRunner.hpp index bcf3ac6..e64a62f 100644 --- a/offload/unittests/Conformance/device_code/Common.hpp +++ b/offload/unittests/Conformance/device_code/KernelRunner.hpp @@ -7,21 +7,19 @@ //===----------------------------------------------------------------------===// /// /// \file -/// This file contains common utilities for defining device kernel wrappers to -/// math functions. +/// This file contains the definition of the runKernelBody, a template helper +/// that executes the per-thread logic of a math function's kernel wrapper. /// //===----------------------------------------------------------------------===// -#ifndef CONFORMANCE_DEVICE_CODE_COMMON_HPP -#define CONFORMANCE_DEVICE_CODE_COMMON_HPP +#ifndef CONFORMANCE_DEVICE_CODE_KERNELRUNNER_HPP +#define CONFORMANCE_DEVICE_CODE_KERNELRUNNER_HPP #include <gpuintrin.h> #include <stddef.h> #include <stdint.h> -namespace common { - -typedef _Float16 float16; +namespace kernels { template <auto Func, typename OutType, typename... InTypes> void runKernelBody(size_t NumElements, OutType *Out, const InTypes *...Ins) { @@ -32,6 +30,6 @@ void runKernelBody(size_t NumElements, OutType *Out, const InTypes *...Ins) { Out[Index] = Func(Ins[Index]...); } } -} // namespace common +} // namespace kernels -#endif // CONFORMANCE_DEVICE_CODE_COMMON_HPP +#endif // CONFORMANCE_DEVICE_CODE_KERNELRUNNER_HPP diff --git a/offload/unittests/Conformance/device_code/LLVMLibm.cpp b/offload/unittests/Conformance/device_code/LLVMLibm.cpp index f137ba3..8869d87 100644 --- a/offload/unittests/Conformance/device_code/LLVMLibm.cpp +++ b/offload/unittests/Conformance/device_code/LLVMLibm.cpp @@ -12,13 +12,14 @@ /// //===----------------------------------------------------------------------===// -#include "Conformance/device_code/Common.hpp" +#include "Conformance/device_code/DeviceAPIs.hpp" +#include "Conformance/device_code/KernelRunner.hpp" #include <gpuintrin.h> #include <math.h> #include <stddef.h> -using namespace common; +using namespace kernels; //===----------------------------------------------------------------------===// // Helpers diff --git a/offload/unittests/Conformance/include/mathtest/TestRunner.hpp b/offload/unittests/Conformance/include/mathtest/TestRunner.hpp index f89d151..ab17f1d 100644 --- a/offload/unittests/Conformance/include/mathtest/TestRunner.hpp +++ b/offload/unittests/Conformance/include/mathtest/TestRunner.hpp @@ -41,11 +41,11 @@ void printPreamble(const TestConfig &Config, size_t Index, size_t Total) noexcept { using FunctionConfig = FunctionConfig<Func>; - llvm::outs() << "[" << (Index + 1) << "/" << Total << "] " + llvm::errs() << "[" << (Index + 1) << "/" << Total << "] " << "Running conformance test '" << FunctionConfig::Name << "' with '" << Config.Provider << "' on '" << Config.Platform << "'\n"; - llvm::outs().flush(); + llvm::errs().flush(); } template <typename T> |