aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp245
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h3
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td2
-rw-r--r--llvm/lib/Target/AMDGPU/SOPInstructions.td11
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp18
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td18
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td6
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3Instructions.td35
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3PInstructions.td12
-rw-r--r--llvm/lib/Target/AMDGPU/VOPCInstructions.td13
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td20
16 files changed, 33 insertions, 379 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index b2dd354..4abb5a6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -405,9 +405,6 @@ extern char &SIModeRegisterID;
void initializeAMDGPUInsertDelayAluPass(PassRegistry &);
extern char &AMDGPUInsertDelayAluID;
-void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &);
-extern char &AMDGPUInsertSingleUseVDSTID;
-
void initializeSIInsertHardClausesPass(PassRegistry &);
extern char &SIInsertHardClausesID;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 919e698..3626fd8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -929,12 +929,6 @@ def FeatureSALUFloatInsts : SubtargetFeature<"salu-float",
"Has SALU floating point instructions"
>;
-def FeatureVGPRSingleUseHintInsts : SubtargetFeature<"vgpr-singleuse-hint",
- "HasVGPRSingleUseHintInsts",
- "true",
- "Has single-use VGPR hint instructions"
->;
-
def FeaturePseudoScalarTrans : SubtargetFeature<"pseudo-scalar-trans",
"HasPseudoScalarTrans",
"true",
@@ -1615,14 +1609,12 @@ def FeatureISAVersion11_5_0 : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
- FeatureVGPRSingleUseHintInsts,
FeatureRequiredExportPriority])>;
def FeatureISAVersion11_5_1 : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
- FeatureVGPRSingleUseHintInsts,
Feature1_5xVGPRs,
FeatureRequiredExportPriority])>;
@@ -1630,7 +1622,6 @@ def FeatureISAVersion11_5_2 : FeatureSet<
!listconcat(FeatureISAVersion11_Common.Features,
[FeatureSALUFloatInsts,
FeatureDPPSrc1SGPR,
- FeatureVGPRSingleUseHintInsts,
FeatureRequiredExportPriority])>;
def FeatureISAVersion12 : FeatureSet<
@@ -1663,7 +1654,6 @@ def FeatureISAVersion12 : FeatureSet<
FeatureSALUFloatInsts,
FeaturePseudoScalarTrans,
FeatureHasRestrictedSOffset,
- FeatureVGPRSingleUseHintInsts,
FeatureScalarDwordx3Loads,
FeatureDPPSrc1SGPR,
FeatureMaxHardClauseLength32,
@@ -2271,9 +2261,6 @@ def HasNotMADIntraFwdBug : Predicate<"!Subtarget->hasMADIntraFwdBug()">;
def HasSALUFloatInsts : Predicate<"Subtarget->hasSALUFloatInsts()">,
AssemblerPredicate<(all_of FeatureSALUFloatInsts)>;
-def HasVGPRSingleUseHintInsts : Predicate<"Subtarget->hasVGPRSingleUseHintInsts()">,
- AssemblerPredicate<(all_of FeatureVGPRSingleUseHintInsts)>;
-
def HasPseudoScalarTrans : Predicate<"Subtarget->hasPseudoScalarTrans()">,
AssemblerPredicate<(all_of FeaturePseudoScalarTrans)>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp
deleted file mode 100644
index 43b3bf4..0000000
--- a/llvm/lib/Target/AMDGPU/AMDGPUInsertSingleUseVDST.cpp
+++ /dev/null
@@ -1,245 +0,0 @@
-//===- AMDGPUInsertSingleUseVDST.cpp - Insert s_singleuse_vdst instructions ==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Insert s_singleuse_vdst instructions on GFX11.5+ to mark regions of VALU
-/// instructions that produce single-use VGPR values. If the value is forwarded
-/// to the consumer instruction prior to VGPR writeback, the hardware can
-/// then skip (kill) the VGPR write.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUGenSearchableTables.inc"
-#include "GCNSubtarget.h"
-#include "SIInstrInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/Register.h"
-#include "llvm/IR/DebugLoc.h"
-#include "llvm/MC/MCRegister.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Pass.h"
-#include <array>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "amdgpu-insert-single-use-vdst"
-
-namespace {
-class AMDGPUInsertSingleUseVDST : public MachineFunctionPass {
-private:
- const SIInstrInfo *SII;
- class SingleUseInstruction {
- private:
- static const unsigned MaxSkipRange = 0b111;
- static const unsigned MaxNumberOfSkipRegions = 2;
-
- unsigned LastEncodedPositionEnd;
- MachineInstr *ProducerInstr;
-
- std::array<unsigned, MaxNumberOfSkipRegions + 1> SingleUseRegions;
- SmallVector<unsigned, MaxNumberOfSkipRegions> SkipRegions;
-
- // Adds a skip region into the instruction.
- void skip(const unsigned ProducerPosition) {
- while (LastEncodedPositionEnd + MaxSkipRange < ProducerPosition) {
- SkipRegions.push_back(MaxSkipRange);
- LastEncodedPositionEnd += MaxSkipRange;
- }
- SkipRegions.push_back(ProducerPosition - LastEncodedPositionEnd);
- LastEncodedPositionEnd = ProducerPosition;
- }
-
- bool currentRegionHasSpace() {
- const auto Region = SkipRegions.size();
- // The first region has an extra bit of encoding space.
- return SingleUseRegions[Region] <
- ((Region == MaxNumberOfSkipRegions) ? 0b1111U : 0b111U);
- }
-
- unsigned encodeImm() {
- // Handle the first Single Use Region separately as it has an extra bit
- // of encoding space.
- unsigned Imm = SingleUseRegions[SkipRegions.size()];
- unsigned ShiftAmount = 4;
- for (unsigned i = SkipRegions.size(); i > 0; i--) {
- Imm |= SkipRegions[i - 1] << ShiftAmount;
- ShiftAmount += 3;
- Imm |= SingleUseRegions[i - 1] << ShiftAmount;
- ShiftAmount += 3;
- }
- return Imm;
- }
-
- public:
- SingleUseInstruction(const unsigned ProducerPosition,
- MachineInstr *Producer)
- : LastEncodedPositionEnd(ProducerPosition + 1), ProducerInstr(Producer),
- SingleUseRegions({1, 0, 0}) {}
-
- // Returns false if adding a new single use producer failed. This happens
- // because it could not be encoded, either because there is no room to
- // encode another single use producer region or that this single use
- // producer is too far away to encode the amount of instructions to skip.
- bool tryAddProducer(const unsigned ProducerPosition, MachineInstr *MI) {
- // Producer is too far away to encode into this instruction or another
- // skip region is needed and SkipRegions.size() = 2 so there's no room for
- // another skip region, therefore a new instruction is needed.
- if (LastEncodedPositionEnd +
- (MaxSkipRange * (MaxNumberOfSkipRegions - SkipRegions.size())) <
- ProducerPosition)
- return false;
-
- // If a skip region is needed.
- if (LastEncodedPositionEnd != ProducerPosition ||
- !currentRegionHasSpace()) {
- // If the current region is out of space therefore a skip region would
- // be needed, but there is no room for another skip region.
- if (SkipRegions.size() == MaxNumberOfSkipRegions)
- return false;
- skip(ProducerPosition);
- }
-
- SingleUseRegions[SkipRegions.size()]++;
- LastEncodedPositionEnd = ProducerPosition + 1;
- ProducerInstr = MI;
- return true;
- }
-
- auto emit(const SIInstrInfo *SII) {
- return BuildMI(*ProducerInstr->getParent(), ProducerInstr, DebugLoc(),
- SII->get(AMDGPU::S_SINGLEUSE_VDST))
- .addImm(encodeImm());
- }
- };
-
-public:
- static char ID;
-
- AMDGPUInsertSingleUseVDST() : MachineFunctionPass(ID) {}
-
- void insertSingleUseInstructions(
- ArrayRef<std::pair<unsigned, MachineInstr *>> SingleUseProducers) const {
- SmallVector<SingleUseInstruction> Instructions;
-
- for (auto &[Position, MI] : SingleUseProducers) {
- // Encode this position into the last single use instruction if possible.
- if (Instructions.empty() ||
- !Instructions.back().tryAddProducer(Position, MI)) {
- // If not, add a new instruction.
- Instructions.push_back(SingleUseInstruction(Position, MI));
- }
- }
-
- for (auto &Instruction : Instructions)
- Instruction.emit(SII);
- }
-
- bool runOnMachineFunction(MachineFunction &MF) override {
- const auto &ST = MF.getSubtarget<GCNSubtarget>();
- if (!ST.hasVGPRSingleUseHintInsts())
- return false;
-
- SII = ST.getInstrInfo();
- const auto *TRI = &SII->getRegisterInfo();
- bool InstructionEmitted = false;
-
- for (MachineBasicBlock &MBB : MF) {
- DenseMap<MCRegUnit, unsigned> RegisterUseCount;
-
- // Handle boundaries at the end of basic block separately to avoid
- // false positives. If they are live at the end of a basic block then
- // assume it has more uses later on.
- for (const auto &Liveout : MBB.liveouts()) {
- for (MCRegUnitMaskIterator Units(Liveout.PhysReg, TRI); Units.isValid();
- ++Units) {
- const auto [Unit, Mask] = *Units;
- if ((Mask & Liveout.LaneMask).any())
- RegisterUseCount[Unit] = 2;
- }
- }
-
- SmallVector<std::pair<unsigned, MachineInstr *>>
- SingleUseProducerPositions;
-
- unsigned VALUInstrCount = 0;
- for (MachineInstr &MI : reverse(MBB.instrs())) {
- // All registers in all operands need to be single use for an
- // instruction to be marked as a single use producer.
- bool AllProducerOperandsAreSingleUse = true;
-
- // Gather a list of Registers used before updating use counts to avoid
- // double counting registers that appear multiple times in a single
- // MachineInstr.
- SmallVector<MCRegUnit> RegistersUsed;
-
- for (const auto &Operand : MI.all_defs()) {
- const auto Reg = Operand.getReg();
-
- const auto RegUnits = TRI->regunits(Reg);
- if (any_of(RegUnits, [&RegisterUseCount](const MCRegUnit Unit) {
- return RegisterUseCount[Unit] > 1;
- }))
- AllProducerOperandsAreSingleUse = false;
-
- // Reset uses count when a register is no longer live.
- for (const MCRegUnit Unit : RegUnits)
- RegisterUseCount.erase(Unit);
- }
-
- for (const auto &Operand : MI.all_uses()) {
- const auto Reg = Operand.getReg();
-
- // Count the number of times each register is read.
- for (const MCRegUnit Unit : TRI->regunits(Reg)) {
- if (!is_contained(RegistersUsed, Unit))
- RegistersUsed.push_back(Unit);
- }
- }
- for (const MCRegUnit Unit : RegistersUsed)
- RegisterUseCount[Unit]++;
-
- // Do not attempt to optimise across exec mask changes.
- if (MI.modifiesRegister(AMDGPU::EXEC, TRI) ||
- AMDGPU::isInvalidSingleUseConsumerInst(MI.getOpcode())) {
- for (auto &UsedReg : RegisterUseCount)
- UsedReg.second = 2;
- }
-
- if (!SIInstrInfo::isVALU(MI) ||
- AMDGPU::isInvalidSingleUseProducerInst(MI.getOpcode()))
- continue;
- if (AllProducerOperandsAreSingleUse) {
- SingleUseProducerPositions.push_back({VALUInstrCount, &MI});
- InstructionEmitted = true;
- }
- VALUInstrCount++;
- }
- insertSingleUseInstructions(SingleUseProducerPositions);
- }
- return InstructionEmitted;
- }
-};
-} // namespace
-
-char AMDGPUInsertSingleUseVDST::ID = 0;
-
-char &llvm::AMDGPUInsertSingleUseVDSTID = AMDGPUInsertSingleUseVDST::ID;
-
-INITIALIZE_PASS(AMDGPUInsertSingleUseVDST, DEBUG_TYPE,
- "AMDGPU Insert SingleUseVDST", false, false)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 04fdee0..abd5074 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -311,12 +311,6 @@ static cl::opt<bool> EnableSIModeRegisterPass(
cl::init(true),
cl::Hidden);
-// Enable GFX11.5+ s_singleuse_vdst insertion
-static cl::opt<bool>
- EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
- cl::desc("Enable s_singleuse_vdst insertion"),
- cl::init(false), cl::Hidden);
-
// Enable GFX11+ s_delay_alu insertion
static cl::opt<bool>
EnableInsertDelayAlu("amdgpu-enable-delay-alu",
@@ -450,7 +444,6 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeAMDGPURewriteUndefForPHILegacyPass(*PR);
initializeAMDGPUUnifyMetadataPass(*PR);
initializeSIAnnotateControlFlowLegacyPass(*PR);
- initializeAMDGPUInsertSingleUseVDSTPass(*PR);
initializeAMDGPUInsertDelayAluPass(*PR);
initializeSIInsertHardClausesPass(*PR);
initializeSIInsertWaitcntsPass(*PR);
@@ -1518,9 +1511,6 @@ void GCNPassConfig::addPreEmitPass() {
// cases.
addPass(&PostRAHazardRecognizerID);
- if (isPassEnabled(EnableInsertSingleUseVDST, CodeGenOptLevel::Less))
- addPass(&AMDGPUInsertSingleUseVDSTID);
-
if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
addPass(&AMDGPUInsertDelayAluID);
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index e813653..7c883cc 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -81,7 +81,6 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUMCInstLower.cpp
AMDGPUMemoryUtils.cpp
AMDGPUIGroupLP.cpp
- AMDGPUInsertSingleUseVDST.cpp
AMDGPUMarkLastScratchLoad.cpp
AMDGPUMIRFormatter.cpp
AMDGPUOpenCLEnqueuedBlockLowering.cpp
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index a4ae8a1..e6b7342 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -215,7 +215,6 @@ protected:
bool HasPackedTID = false;
bool ScalarizeGlobal = false;
bool HasSALUFloatInsts = false;
- bool HasVGPRSingleUseHintInsts = false;
bool HasPseudoScalarTrans = false;
bool HasRestrictedSOffset = false;
@@ -1280,8 +1279,6 @@ public:
bool hasSALUFloatInsts() const { return HasSALUFloatInsts; }
- bool hasVGPRSingleUseHintInsts() const { return HasVGPRSingleUseHintInsts; }
-
bool hasPseudoScalarTrans() const { return HasPseudoScalarTrans; }
bool hasRestrictedSOffset() const { return HasRestrictedSOffset; }
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index c016be2..087ca1f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -2409,8 +2409,6 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableClamp = 0> {
field bit EnableClamp = _EnableClamp;
field bit IsTrue16 = 0;
field bit IsRealTrue16 = 0;
- field bit IsInvalidSingleUseConsumer = 0;
- field bit IsInvalidSingleUseProducer = 0;
field ValueType DstVT = ArgVT[0];
field ValueType Src0VT = ArgVT[1];
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 2e73a1a..9da27a7 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1752,11 +1752,6 @@ let OtherPredicates = [HasExportInsts] in
"$simm16">;
} // End SubtargetPredicate = isGFX11Plus
-let SubtargetPredicate = HasVGPRSingleUseHintInsts in {
- def S_SINGLEUSE_VDST :
- SOPP_Pseudo<"s_singleuse_vdst", (ins s16imm:$simm16), "$simm16">;
-} // End SubtargetPredicate = HasVGPRSingeUseHintInsts
-
let SubtargetPredicate = isGFX12Plus, hasSideEffects = 1 in {
def S_WAIT_LOADCNT :
SOPP_Pseudo<"s_wait_loadcnt", (ins s16imm:$simm16), "$simm16",
@@ -2677,12 +2672,6 @@ defm S_ICACHE_INV : SOPP_Real_32_gfx11_gfx12<0x03c>;
defm S_BARRIER : SOPP_Real_32_gfx11<0x03d>;
//===----------------------------------------------------------------------===//
-// SOPP - GFX1150, GFX12.
-//===----------------------------------------------------------------------===//
-
-defm S_SINGLEUSE_VDST : SOPP_Real_32_gfx11_gfx12<0x013>;
-
-//===----------------------------------------------------------------------===//
// SOPP - GFX6, GFX7, GFX8, GFX9, GFX10
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 8b5ec87..f32c82f 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -379,12 +379,6 @@ struct VOPTrue16Info {
bool IsTrue16;
};
-struct SingleUseExceptionInfo {
- uint16_t Opcode;
- bool IsInvalidSingleUseConsumer;
- bool IsInvalidSingleUseProducer;
-};
-
struct FP8DstByteSelInfo {
uint16_t Opcode;
bool HasFP8DstByteSel;
@@ -396,8 +390,6 @@ struct FP8DstByteSelInfo {
#define GET_MTBUFInfoTable_IMPL
#define GET_MUBUFInfoTable_DECL
#define GET_MUBUFInfoTable_IMPL
-#define GET_SingleUseExceptionTable_DECL
-#define GET_SingleUseExceptionTable_IMPL
#define GET_SMInfoTable_DECL
#define GET_SMInfoTable_IMPL
#define GET_VOP1InfoTable_DECL
@@ -626,16 +618,6 @@ bool isTrue16Inst(unsigned Opc) {
return Info ? Info->IsTrue16 : false;
}
-bool isInvalidSingleUseConsumerInst(unsigned Opc) {
- const SingleUseExceptionInfo *Info = getSingleUseExceptionHelper(Opc);
- return Info && Info->IsInvalidSingleUseConsumer;
-}
-
-bool isInvalidSingleUseProducerInst(unsigned Opc) {
- const SingleUseExceptionInfo *Info = getSingleUseExceptionHelper(Opc);
- return Info && Info->IsInvalidSingleUseProducer;
-}
-
bool isFP8DstSelInst(unsigned Opc) {
const FP8DstByteSelInfo *Info = getFP8DstByteSelHelper(Opc);
return Info ? Info->HasFP8DstByteSel : false;
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 35c080d..da37534 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -870,6 +870,8 @@ bool isInvalidSingleUseConsumerInst(unsigned Opc);
LLVM_READONLY
bool isInvalidSingleUseProducerInst(unsigned Opc);
+bool isDPMACCInstruction(unsigned Opc);
+
LLVM_READONLY
unsigned mapWMMA2AddrTo3AddrOpcode(unsigned Opc);
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 33f2f9f..bd80505 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -252,7 +252,6 @@ def VOP_READFIRSTLANE : VOPProfile <[i32, i32, untyped, untyped]> {
def V_READFIRSTLANE_B32 : VOP1_Pseudo <"v_readfirstlane_b32", VOP_READFIRSTLANE,
[], 1> {
let isConvergent = 1;
- let IsInvalidSingleUseConsumer = 1;
}
foreach vt = Reg32Types.types in {
@@ -375,7 +374,6 @@ defm V_CLREXCP : VOP1Inst <"v_clrexcp", VOP_NO_EXT<VOP_NONE>>;
def VOP_MOVRELS : VOPProfile<[i32, i32, untyped, untyped]> {
let Src0RC32 = VRegSrc_32;
let Src0RC64 = VRegSrc_32;
- let IsInvalidSingleUseConsumer = 1;
}
// Special case because there are no true output operands. Hack vdst
@@ -419,12 +417,8 @@ class VOP_MOVREL<RegisterOperand Src1RC> : VOPProfile<[untyped, i32, untyped, un
let EmitDst = 1; // force vdst emission
}
-let IsInvalidSingleUseProducer = 1 in {
- def VOP_MOVRELD : VOP_MOVREL<VSrc_b32>;
- def VOP_MOVRELSD : VOP_MOVREL<VRegSrc_32> {
- let IsInvalidSingleUseConsumer = 1;
- }
-}
+def VOP_MOVRELD : VOP_MOVREL<VSrc_b32>;
+def VOP_MOVRELSD : VOP_MOVREL<VRegSrc_32>;
let SubtargetPredicate = HasMovrel, Uses = [M0, EXEC] in {
// v_movreld_b32 is a special case because the destination output
@@ -541,7 +535,6 @@ let SubtargetPredicate = isGFX9Plus in {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1,$src1";
let SchedRW = [Write64Bit, Write64Bit];
- let IsInvalidSingleUseConsumer = 1;
}
let isReMaterializable = 1 in
@@ -708,8 +701,6 @@ let SubtargetPredicate = isGFX10Plus in {
let Constraints = "$vdst = $src1, $vdst1 = $src0";
let DisableEncoding = "$vdst1,$src1";
let SchedRW = [Write64Bit, Write64Bit];
- let IsInvalidSingleUseConsumer = 1;
- let IsInvalidSingleUseProducer = 1;
}
} // End Uses = [M0]
} // End SubtargetPredicate = isGFX10Plus
@@ -743,10 +734,7 @@ let SubtargetPredicate = isGFX11Plus in {
}
// Restrict src0 to be VGPR
def V_PERMLANE64_B32 : VOP1_Pseudo<"v_permlane64_b32", VOP_MOVRELS,
- [], /*VOP1Only=*/ 1> {
- let IsInvalidSingleUseConsumer = 1;
- let IsInvalidSingleUseProducer = 1;
- }
+ [], /*VOP1Only=*/ 1>;
defm V_MOV_B16 : VOP1Inst_t16<"v_mov_b16", VOP_I16_I16>;
defm V_NOT_B16 : VOP1Inst_t16<"v_not_b16", VOP_I16_I16>;
defm V_CVT_I32_I16 : VOP1Inst_t16<"v_cvt_i32_i16", VOP_I32_I16>;
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index dd48607..52f7be3 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -788,12 +788,10 @@ defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32_ARITH, null_frag,
} // End isCommutable = 1
// These are special and do not read the exec mask.
-let isConvergent = 1, Uses = []<Register>, IsInvalidSingleUseConsumer = 1 in {
+let isConvergent = 1, Uses = []<Register> in {
def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE, []>;
let IsNeverUniform = 1, Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
-def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, []> {
- let IsInvalidSingleUseProducer = 1;
- }
+def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, []>;
} // End IsNeverUniform, $vdst = $vdst_in, DisableEncoding $vdst_in
} // End isConvergent = 1
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 466114b..20beb41 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -157,12 +157,12 @@ defm V_MAX_F64 : VOP3Inst <"v_max_f64", VOP3_Profile<VOP_F64_F64_F64>, fmaxnum_l
} // End SubtargetPredicate = isNotGFX12Plus
} // End SchedRW = [WriteDoubleAdd]
-let SchedRW = [WriteIntMul], IsInvalidSingleUseConsumer = 1 in {
+let SchedRW = [WriteIntMul] in {
defm V_MUL_LO_U32 : VOP3Inst <"v_mul_lo_u32", V_MUL_PROF<VOP_I32_I32_I32>, DivergentBinFrag<mul>>;
defm V_MUL_HI_U32 : VOP3Inst <"v_mul_hi_u32", V_MUL_PROF<VOP_I32_I32_I32>, mulhu>;
defm V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", V_MUL_PROF<VOP_I32_I32_I32>>;
defm V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", V_MUL_PROF<VOP_I32_I32_I32>, mulhs>;
-} // End SchedRW = [WriteIntMul], IsInvalidSingleUseConsumer = 1
+} // End SchedRW = [WriteIntMul]
let SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0 in {
defm V_MINIMUM_F32 : VOP3Inst <"v_minimum_f32", VOP3_Profile<VOP_F32_F32_F32>, DivergentBinFrag<fminimum>>;
@@ -260,9 +260,9 @@ let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it d
let isReMaterializable = 1 in
defm V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
-let Constraints = "@earlyclobber $vdst", IsInvalidSingleUseConsumer = 1 in {
+let Constraints = "@earlyclobber $vdst" in {
defm V_MQSAD_PK_U16_U8 : VOP3Inst <"v_mqsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
-} // End Constraints = "@earlyclobber $vdst", IsInvalidSingleUseConsumer = 1
+} // End Constraints = "@earlyclobber $vdst"
let isReMaterializable = 1 in {
@@ -277,16 +277,14 @@ let SchedRW = [Write64Bit] in {
defm V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_I64_I64_I32>, csra_64>;
} // End SubtargetPredicate = isGFX6GFX7
- let IsInvalidSingleUseConsumer = 1 in {
let SubtargetPredicate = isGFX8Plus in {
defm V_LSHRREV_B64 : VOP3Inst <"v_lshrrev_b64", VOP3_Profile<VOP_I64_I32_I64>, clshr_rev_64>;
defm V_ASHRREV_I64 : VOP3Inst <"v_ashrrev_i64", VOP3_Profile<VOP_I64_I32_I64>, cashr_rev_64>;
- } // End SubtargetPredicate = isGFX8Plus, , IsInvalidSingleUseConsumer = 1
+ } // End SubtargetPredicate = isGFX8Plus
let SubtargetPredicate = isGFX8GFX9GFX10GFX11 in {
defm V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile<VOP_I64_I32_I64>, clshl_rev_64>;
} // End SubtargetPredicate = isGFX8GFX9GFX10GFX11
- } // End IsInvalidSingleUseConsumer = 1
} // End SchedRW = [Write64Bit]
} // End isReMaterializable = 1
@@ -311,14 +309,14 @@ def VOPProfileMQSAD : VOP3_Profile<VOP_V4I32_I64_I32_V4I32, VOP3_CLAMP> {
let HasModifiers = 0;
}
-let SubtargetPredicate = isGFX7Plus, IsInvalidSingleUseConsumer = 1 in {
+let SubtargetPredicate = isGFX7Plus in {
let Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32] in {
defm V_QSAD_PK_U16_U8 : VOP3Inst <"v_qsad_pk_u16_u8", VOP3_Profile<VOP_I64_I64_I32_I64, VOP3_CLAMP>>;
defm V_MQSAD_U32_U8 : VOP3Inst <"v_mqsad_u32_u8", VOPProfileMQSAD>;
} // End Constraints = "@earlyclobber $vdst", SchedRW = [WriteQuarterRate32]
-} // End SubtargetPredicate = isGFX7Plus, IsInvalidSingleUseConsumer = 1
+} // End SubtargetPredicate = isGFX7Plus
-let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU], IsInvalidSingleUseConsumer = 1 in {
+let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU] in {
let SubtargetPredicate = isGFX7Plus, OtherPredicates = [HasNotMADIntraFwdBug] in {
defm V_MAD_U64_U32 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>;
defm V_MAD_I64_I32 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>;
@@ -328,7 +326,7 @@ let isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU], IsInvalidSingleUseCons
defm V_MAD_U64_U32_gfx11 : VOP3Inst <"v_mad_u64_u32", VOP3b_I64_I1_I32_I32_I64>;
defm V_MAD_I64_I32_gfx11 : VOP3Inst <"v_mad_i64_i32", VOP3b_I64_I1_I32_I32_I64>;
}
-} // End isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU], IsInvalidSingleUseConsumer = 1
+} // End isCommutable = 1, SchedRW = [WriteIntMul, WriteSALU]
let FPDPRounding = 1 in {
@@ -865,10 +863,10 @@ let SubtargetPredicate = isGFX10Plus in {
} // End isCommutable = 1, isReMaterializable = 1
def : ThreeOp_i32_Pats<xor, xor, V_XOR3_B32_e64>;
- let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in", IsInvalidSingleUseConsumer = 1, IsInvalidSingleUseProducer = 1 in {
+ let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
defm V_PERMLANE16_B32 : VOP3Inst<"v_permlane16_b32", VOP3_PERMLANE_Profile>;
defm V_PERMLANEX16_B32 : VOP3Inst<"v_permlanex16_b32", VOP3_PERMLANE_Profile>;
- } // End $vdst = $vdst_in, DisableEncoding $vdst_in, IsInvalidSingleUseConsumer = 1, IsInvalidSingleUseProducer = 1
+ } // End $vdst = $vdst_in, DisableEncoding $vdst_in
foreach vt = Reg32Types.types in {
def : PermlanePat<int_amdgcn_permlane16, V_PERMLANE16_B32_e64, vt>;
@@ -1286,12 +1284,11 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
}
} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10"
-let IsInvalidSingleUseConsumer = 1 in {
- defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx10<0x360>;
- let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in), IsInvalidSingleUseProducer = 1 in {
- defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>;
- } // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32: $src1, VGPR_32:$vdst_in), IsInvalidSingleUseProducer = 1
-} // End IsInvalidSingleUseConsumer = 1
+defm V_READLANE_B32 : VOP3_Real_No_Suffix_gfx10<0x360>;
+
+let InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in) in {
+ defm V_WRITELANE_B32 : VOP3_Real_No_Suffix_gfx10<0x361>;
+} // End InOperandList = (ins SSrcOrLds_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in)
let SubtargetPredicate = isGFX10Before1030 in {
defm V_MUL_LO_I32 : VOP3_Real_gfx10<0x16b>;
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index f4d2c29..5eee718 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -382,19 +382,15 @@ defm V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16",
AMDGPUfdot2, 1/*ExplicitClamp*/>;
let OtherPredicates = [HasDot7Insts] in {
-let IsInvalidSingleUseConsumer = 1 in {
- defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8",
- VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>;
-}
+defm V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8",
+ VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot4, 1>;
defm V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4",
VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_udot8, 1>;
} // End OtherPredicates = [HasDot7Insts]
let OtherPredicates = [HasDot1Insts] in {
-let IsInvalidSingleUseConsumer = 1 in {
- defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8",
- VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>;
-}
+defm V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8",
+ VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot4, 1>;
defm V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4",
VOP3P_Profile<VOP_I32_I32_I32_I32, VOP3_PACKED>, int_amdgcn_sdot8, 1>;
} // End OtherPredicates = [HasDot1Insts]
diff --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index be862b4..d6e08dc 100644
--- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -464,10 +464,9 @@ multiclass VOPC_I16 <string opName, SDPatternOperator cond = COND_NULL,
multiclass VOPC_I32 <string opName, SDPatternOperator cond = COND_NULL, string revOp = opName> :
VOPC_Pseudos <opName, VOPC_I1_I32_I32, cond, revOp, 0>;
-let IsInvalidSingleUseConsumer = 1 in {
- multiclass VOPC_I64 <string opName, SDPatternOperator cond = COND_NULL, string revOp = opName> :
- VOPC_Pseudos <opName, VOPC_I1_I64_I64, cond, revOp, 0>;
-}
+multiclass VOPC_I64 <string opName, SDPatternOperator cond = COND_NULL, string revOp = opName> :
+ VOPC_Pseudos <opName, VOPC_I1_I64_I64, cond, revOp, 0>;
+
multiclass VOPCX_F16<string opName, string revOp = opName> {
let OtherPredicates = [Has16BitInsts], True16Predicate = NotHasTrue16BitInsts in {
@@ -502,10 +501,8 @@ multiclass VOPCX_I16<string opName, string revOp = opName> {
multiclass VOPCX_I32 <string opName, string revOp = opName> :
VOPCX_Pseudos <opName, VOPC_I1_I32_I32, VOPC_I32_I32, COND_NULL, revOp>;
-let IsInvalidSingleUseConsumer = 1 in {
- multiclass VOPCX_I64 <string opName, string revOp = opName> :
- VOPCX_Pseudos <opName, VOPC_I1_I64_I64, VOPC_I64_I64, COND_NULL, revOp>;
-}
+multiclass VOPCX_I64 <string opName, string revOp = opName> :
+ VOPCX_Pseudos <opName, VOPC_I1_I64_I64, VOPC_I64_I64, COND_NULL, revOp>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 5a460ef..05a7d90 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -17,8 +17,6 @@ class LetDummies {
bit isReMaterializable;
bit isAsCheapAsAMove;
bit FPDPRounding;
- bit IsInvalidSingleUseConsumer;
- bit IsInvalidSingleUseProducer;
Predicate SubtargetPredicate;
string Constraints;
string DisableEncoding;
@@ -67,8 +65,6 @@ class VOP_Pseudo <string opName, string suffix, VOPProfile P, dag outs, dag ins,
string Mnemonic = opName;
Instruction Opcode = !cast<Instruction>(NAME);
bit IsTrue16 = P.IsTrue16;
- bit IsInvalidSingleUseConsumer = P.IsInvalidSingleUseConsumer;
- bit IsInvalidSingleUseProducer = P.IsInvalidSingleUseProducer;
VOPProfile Pfl = P;
string AsmOperands;
@@ -165,8 +161,6 @@ class VOP3P_Pseudo <string opName, VOPProfile P, list<dag> pattern = []> :
class VOP_Real<VOP_Pseudo ps> {
Instruction Opcode = !cast<Instruction>(NAME);
bit IsSingle = ps.Pfl.IsSingle;
- bit IsInvalidSingleUseConsumer = ps.Pfl.IsInvalidSingleUseConsumer;
- bit IsInvalidSingleUseProducer = ps.Pfl.IsInvalidSingleUseProducer;
}
class VOP3_Real <VOP_Pseudo ps, int EncodingFamily, string asm_name = ps.Mnemonic> :
@@ -844,9 +838,6 @@ class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[],
let Constraints = !if(P.NumSrcArgs, P.TieRegDPP # " = $vdst", "");
let DisableEncoding = !if(P.NumSrcArgs, P.TieRegDPP, "");
let DecoderNamespace = "GFX8";
-
- let IsInvalidSingleUseConsumer = !not(VINTERP);
- let IsInvalidSingleUseProducer = !not(VINTERP);
}
class VOP3_DPP_Pseudo <string OpName, VOPProfile P> :
@@ -1714,13 +1705,4 @@ def VOPTrue16Table : GenericTable {
let PrimaryKey = ["Opcode"];
let PrimaryKeyName = "getTrue16OpcodeHelper";
-}
-
-def SingleUseExceptionTable : GenericTable {
- let FilterClass = "VOP_Pseudo";
- let CppTypeName = "SingleUseExceptionInfo";
- let Fields = ["Opcode", "IsInvalidSingleUseConsumer", "IsInvalidSingleUseProducer"];
-
- let PrimaryKey = ["Opcode"];
- let PrimaryKeyName = "getSingleUseExceptionHelper";
-}
+} \ No newline at end of file