aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2021-02-19 08:57:14 -0500
committerMatt Arsenault <Matthew.Arsenault@amd.com>2021-02-24 14:49:37 -0500
commit78b6d73a93fc6085d2a2fc84bdce1bbde740cf16 (patch)
tree8a29dfc5a447685c330c9709481deb0f08a82a6a /llvm/lib
parente79cd47e1620045562960ddfe17ab0c4f6e6628f (diff)
downloadllvm-78b6d73a93fc6085d2a2fc84bdce1bbde740cf16.zip
llvm-78b6d73a93fc6085d2a2fc84bdce1bbde740cf16.tar.gz
llvm-78b6d73a93fc6085d2a2fc84bdce1bbde740cf16.tar.bz2
AMDGPU: Add even aligned VGPR/AGPR register classes
gfx90a operations require even aligned registers, but this was previously achieved by reserving registers inside the full class. Ideally this would be captured in the static instruction definitions for the operands, and we would have different instructions per subtarget. The hackiest part of this is we need to manually reassign AGPR register classes after instruction selection (we get away without this for VGPRs since those types are actually registered for legal types).
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h3
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp82
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp56
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td4
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp118
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.h28
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td59
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp16
10 files changed, 297 insertions, 94 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 1ccef77..c1c83aa 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2969,7 +2969,7 @@ bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
- Register IdxReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
+ Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
.addReg(VIndex.getReg())
.addImm(AMDGPU::sub0)
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index c2f172a..c2d3491 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -930,6 +930,9 @@ public:
bool hasGFX90AInsts() const { return GFX90AInsts; }
+ /// Return if operations acting on VGPR tuples require even alignment.
+ bool needsAlignedVGPRs() const { return GFX90AInsts; }
+
bool hasPackedTID() const { return HasPackedTID; }
/// Return the maximum number of waves per SIMD for kernels using \p SGPRs
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index c36ed09..03e537e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -80,36 +80,40 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
- addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
- addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
+
+ const SIRegisterInfo *TRI = STI.getRegisterInfo();
+ const TargetRegisterClass *V64RegClass = TRI->getVGPR64Class();
+
+ addRegisterClass(MVT::f64, V64RegClass);
+ addRegisterClass(MVT::v2f32, V64RegClass);
addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
- addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
+ addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96));
addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass);
- addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
+ addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128));
addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
- addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
+ addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160));
addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass);
- addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
+ addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256));
addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass);
- addRegisterClass(MVT::v4f64, &AMDGPU::VReg_256RegClass);
+ addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256));
addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass);
- addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
+ addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512));
addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass);
- addRegisterClass(MVT::v8f64, &AMDGPU::VReg_512RegClass);
+ addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512));
addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass);
- addRegisterClass(MVT::v16f64, &AMDGPU::VReg_1024RegClass);
+ addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024));
if (Subtarget->has16BitInsts()) {
addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
@@ -123,7 +127,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}
addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
- addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
+ addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024));
computeRegisterProperties(Subtarget->getRegisterInfo());
@@ -11334,9 +11338,11 @@ MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
//===----------------------------------------------------------------------===//
std::pair<unsigned, const TargetRegisterClass *>
-SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
StringRef Constraint,
MVT VT) const {
+ const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(TRI_);
+
const TargetRegisterClass *RC = nullptr;
if (Constraint.size() == 1) {
const unsigned BitWidth = VT.getSizeInBits();
@@ -11365,7 +11371,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
RC = &AMDGPU::VGPR_32RegClass;
break;
default:
- RC = SIRegisterInfo::getVGPRClassForBitWidth(BitWidth);
+ RC = TRI->getVGPRClassForBitWidth(BitWidth);
if (!RC)
return std::make_pair(0U, nullptr);
break;
@@ -11379,7 +11385,7 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
RC = &AMDGPU::AGPR_32RegClass;
break;
default:
- RC = SIRegisterInfo::getAGPRClassForBitWidth(BitWidth);
+ RC = TRI->getAGPRClassForBitWidth(BitWidth);
if (!RC)
return std::make_pair(0U, nullptr);
break;
@@ -11552,6 +11558,35 @@ bool SITargetLowering::checkAsmConstraintValA(SDValue Op,
return false;
}
+static int getAlignedAGPRClassID(unsigned UnalignedClassID) {
+ switch (UnalignedClassID) {
+ case AMDGPU::VReg_64RegClassID:
+ return AMDGPU::VReg_64_Align2RegClassID;
+ case AMDGPU::VReg_96RegClassID:
+ return AMDGPU::VReg_96_Align2RegClassID;
+ case AMDGPU::VReg_128RegClassID:
+ return AMDGPU::VReg_128_Align2RegClassID;
+ case AMDGPU::VReg_256RegClassID:
+ return AMDGPU::VReg_256_Align2RegClassID;
+ case AMDGPU::VReg_512RegClassID:
+ return AMDGPU::VReg_512_Align2RegClassID;
+ case AMDGPU::AReg_64RegClassID:
+ return AMDGPU::AReg_64_Align2RegClassID;
+ case AMDGPU::AReg_96RegClassID:
+ return AMDGPU::AReg_96_Align2RegClassID;
+ case AMDGPU::AReg_128RegClassID:
+ return AMDGPU::AReg_128_Align2RegClassID;
+ case AMDGPU::AReg_256RegClassID:
+ return AMDGPU::AReg_256_Align2RegClassID;
+ case AMDGPU::AReg_512RegClassID:
+ return AMDGPU::AReg_512_Align2RegClassID;
+ case AMDGPU::AReg_1024RegClassID:
+ return AMDGPU::AReg_1024_Align2RegClassID;
+ default:
+ return -1;
+ }
+}
+
// Figure out which registers should be reserved for stack access. Only after
// the function is legalized do we know all of the non-spill stack objects or if
// calls are present.
@@ -11560,6 +11595,7 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const SIInstrInfo *TII = ST.getInstrInfo();
if (Info->isEntryFunction()) {
// Callable functions have fixed registers used for stack access.
@@ -11582,7 +11618,6 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
Info->limitOccupancy(MF);
if (ST.isWave32() && !MF.empty()) {
- const SIInstrInfo *TII = ST.getInstrInfo();
for (auto &MBB : MF) {
for (auto &MI : MBB) {
TII->fixImplicitOperands(MI);
@@ -11590,6 +11625,23 @@ void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
}
}
+ // FIXME: This is a hack to fixup AGPR classes to use the properly aligned
+ // classes if required. Ideally the register class constraints would differ
+ // per-subtarget, but there's no easy way to achieve that right now. This is
+ // not a problem for VGPRs because the correctly aligned VGPR class is implied
+ // from using them as the register class for legal types.
+ if (ST.needsAlignedVGPRs()) {
+ for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
+ const Register Reg = Register::index2VirtReg(I);
+ const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg);
+ if (!RC)
+ continue;
+ int NewClassID = getAlignedAGPRClassID(RC->getID());
+ if (NewClassID != -1)
+ MRI.setRegClass(Reg, TRI->getRegClass(NewClassID));
+ }
+ }
+
TargetLoweringBase::finalizeLowering(MF);
// Allocate a VGPR for future SGPR Spill if
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 162bbd7..46e213b 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -872,7 +872,7 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
- if (RC == &AMDGPU::VReg_64RegClass &&
+ if (RC->hasSuperClassEq(&AMDGPU::VReg_64RegClass) &&
!RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) {
if (ST.hasPackedFP32Ops()) {
BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg)
@@ -1021,7 +1021,7 @@ void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB,
.addImm(Value);
return;
}
- if (RegClass == &AMDGPU::VReg_64RegClass) {
+ if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) {
BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg)
.addImm(Value);
return;
@@ -3776,7 +3776,8 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
// Make sure the register classes are correct.
for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
- if (MI.getOperand(i).isFPImm()) {
+ const MachineOperand &MO = MI.getOperand(i);
+ if (MO.isFPImm()) {
ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
"all fp values to integers.";
return false;
@@ -3805,7 +3806,6 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64: {
- const MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
ErrInfo = "Illegal immediate value for operand.";
return false;
@@ -3826,12 +3826,40 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
continue;
}
- if (!MI.getOperand(i).isReg())
+ if (!MO.isReg())
+ continue;
+ Register Reg = MO.getReg();
+ if (!Reg)
continue;
+ // FIXME: Ideally we would have separate instruction definitions with the
+ // aligned register constraint.
+ // FIXME: We do not verify inline asm operands, but custom inline asm
+ // verification is broken anyway
+ if (ST.needsAlignedVGPRs()) {
+ const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg);
+ const bool IsVGPR = RI.hasVGPRs(RC);
+ const bool IsAGPR = !IsVGPR && RI.hasAGPRs(RC);
+ if ((IsVGPR || IsAGPR) && MO.getSubReg()) {
+ const TargetRegisterClass *SubRC =
+ RI.getSubRegClass(RC, MO.getSubReg());
+ RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg());
+ if (RC)
+ RC = SubRC;
+ }
+
+ // Check that this is the aligned version of the class.
+ if (!RC || ((IsVGPR && !RC->hasSuperClassEq(RI.getVGPRClassForBitWidth(
+ RI.getRegSizeInBits(*RC)))) ||
+ (IsAGPR && !RC->hasSuperClassEq(RI.getAGPRClassForBitWidth(
+ RI.getRegSizeInBits(*RC)))))) {
+ ErrInfo = "Subtarget requires even aligned vector registers";
+ return false;
+ }
+ }
+
if (RegClass != -1) {
- Register Reg = MI.getOperand(i).getReg();
- if (Reg == AMDGPU::NoRegister || Reg.isVirtual())
+ if (Reg.isVirtual())
continue;
const TargetRegisterClass *RC = RI.getRegClass(RegClass);
@@ -4320,9 +4348,12 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO &&
((DstIdx >= 0 &&
- Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID) ||
- ((Src0Idx >= 0 &&
- Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID))) &&
+ (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID ||
+ Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) ||
+ ((Src0Idx >= 0 &&
+ (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID ||
+ Desc.OpInfo[Src0Idx].RegClass ==
+ AMDGPU::VReg_64_Align2RegClassID)))) &&
!AMDGPU::isLegal64BitDPPControl(DC)) {
ErrInfo = "Invalid dpp_ctrl value: "
"64 bit dpp only support row_newbcast";
@@ -4532,8 +4563,9 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
- if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
- VRC = &AMDGPU::VReg_64RegClass;
+ const TargetRegisterClass *VRC64 = RI.getVGPR64Class();
+ if (RI.getCommonSubClass(VRC64, VRC))
+ VRC = VRC64;
else
VRC = &AMDGPU::VGPR_32RegClass;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 2482649..fcb803e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -2299,7 +2299,7 @@ def : GCNPat <
def : GCNPat <
(i64 (int_amdgcn_mov_dpp i64:$src, timm:$dpp_ctrl, timm:$row_mask,
timm:$bank_mask, timm:$bound_ctrl)),
- (V_MOV_B64_DPP_PSEUDO VReg_64:$src, VReg_64:$src,
+ (V_MOV_B64_DPP_PSEUDO VReg_64_Align2:$src, VReg_64_Align2:$src,
(as_i32timm $dpp_ctrl), (as_i32timm $row_mask),
(as_i32timm $bank_mask),
(as_i1timm $bound_ctrl))
@@ -2308,7 +2308,7 @@ def : GCNPat <
def : GCNPat <
(i64 (int_amdgcn_update_dpp i64:$old, i64:$src, timm:$dpp_ctrl, timm:$row_mask,
timm:$bank_mask, timm:$bound_ctrl)),
- (V_MOV_B64_DPP_PSEUDO VReg_64:$old, VReg_64:$src, (as_i32timm $dpp_ctrl),
+ (V_MOV_B64_DPP_PSEUDO VReg_64_Align2:$old, VReg_64_Align2:$src, (as_i32timm $dpp_ctrl),
(as_i32timm $row_mask), (as_i32timm $bank_mask),
(as_i1timm $bound_ctrl))
>;
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 2bdf622..8f970a9 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -1612,26 +1612,11 @@ SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI,
return &AMDGPU::SGPR_512RegClass;
}
}
- const TargetRegisterClass *RC = nullptr;
- switch (CI.Width + Paired.Width) {
- default:
- return nullptr;
- case 2:
- RC = &AMDGPU::VReg_64RegClass;
- break;
- case 3:
- RC = &AMDGPU::VReg_96RegClass;
- break;
- case 4:
- RC = &AMDGPU::VReg_128RegClass;
- break;
- }
-
- if (TRI->hasAGPRs(getDataRegClass(*CI.I)))
- return TRI->getEquivalentAGPRClass(RC);
-
- return RC;
+ unsigned BitWidth = 32 * (CI.Width + Paired.Width);
+ return TRI->hasAGPRs(getDataRegClass(*CI.I))
+ ? TRI->getAGPRClassForBitWidth(BitWidth)
+ : TRI->getVGPRClassForBitWidth(BitWidth);
}
MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index bb2ac72..3129d5b 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -345,13 +345,6 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
reserveRegisterTuples(Reserved, Reg);
}
- if (ST.hasGFX90AInsts())
- for (const TargetRegisterClass *RC : this->regclasses())
- if (getRegSizeInBits(*RC) > 32 && hasVectorRegisters(RC))
- for (unsigned Reg : *RC)
- if (getEncodingValue(Reg) & 1)
- Reserved.set(Reg);
-
// FIXME: Stop using reserved registers for this.
for (MCPhysReg Reg : MFI->getAGPRSpillVGPRs())
reserveRegisterTuples(Reserved, Reg);
@@ -1763,14 +1756,8 @@ StringRef SIRegisterInfo::getRegAsmName(MCRegister Reg) const {
return AMDGPUInstPrinter::getRegisterName(Reg);
}
-const TargetRegisterClass *
-SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) {
- if (BitWidth == 1)
- return &AMDGPU::VReg_1RegClass;
- if (BitWidth <= 16)
- return &AMDGPU::VGPR_LO16RegClass;
- if (BitWidth <= 32)
- return &AMDGPU::VGPR_32RegClass;
+static const TargetRegisterClass *
+getAnyVGPRClassForBitWidth(unsigned BitWidth) {
if (BitWidth <= 64)
return &AMDGPU::VReg_64RegClass;
if (BitWidth <= 96)
@@ -1791,12 +1778,42 @@ SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) {
return nullptr;
}
+static const TargetRegisterClass *
+getAlignedVGPRClassForBitWidth(unsigned BitWidth) {
+ if (BitWidth <= 64)
+ return &AMDGPU::VReg_64_Align2RegClass;
+ if (BitWidth <= 96)
+ return &AMDGPU::VReg_96_Align2RegClass;
+ if (BitWidth <= 128)
+ return &AMDGPU::VReg_128_Align2RegClass;
+ if (BitWidth <= 160)
+ return &AMDGPU::VReg_160_Align2RegClass;
+ if (BitWidth <= 192)
+ return &AMDGPU::VReg_192_Align2RegClass;
+ if (BitWidth <= 256)
+ return &AMDGPU::VReg_256_Align2RegClass;
+ if (BitWidth <= 512)
+ return &AMDGPU::VReg_512_Align2RegClass;
+ if (BitWidth <= 1024)
+ return &AMDGPU::VReg_1024_Align2RegClass;
+
+ return nullptr;
+}
+
const TargetRegisterClass *
-SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) {
+SIRegisterInfo::getVGPRClassForBitWidth(unsigned BitWidth) const {
+ if (BitWidth == 1)
+ return &AMDGPU::VReg_1RegClass;
if (BitWidth <= 16)
- return &AMDGPU::AGPR_LO16RegClass;
+ return &AMDGPU::VGPR_LO16RegClass;
if (BitWidth <= 32)
- return &AMDGPU::AGPR_32RegClass;
+ return &AMDGPU::VGPR_32RegClass;
+ return ST.needsAlignedVGPRs() ? getAlignedVGPRClassForBitWidth(BitWidth)
+ : getAnyVGPRClassForBitWidth(BitWidth);
+}
+
+static const TargetRegisterClass *
+getAnyAGPRClassForBitWidth(unsigned BitWidth) {
if (BitWidth <= 64)
return &AMDGPU::AReg_64RegClass;
if (BitWidth <= 96)
@@ -1817,6 +1834,38 @@ SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) {
return nullptr;
}
+static const TargetRegisterClass *
+getAlignedAGPRClassForBitWidth(unsigned BitWidth) {
+ if (BitWidth <= 64)
+ return &AMDGPU::AReg_64_Align2RegClass;
+ if (BitWidth <= 96)
+ return &AMDGPU::AReg_96_Align2RegClass;
+ if (BitWidth <= 128)
+ return &AMDGPU::AReg_128_Align2RegClass;
+ if (BitWidth <= 160)
+ return &AMDGPU::AReg_160_Align2RegClass;
+ if (BitWidth <= 192)
+ return &AMDGPU::AReg_192_Align2RegClass;
+ if (BitWidth <= 256)
+ return &AMDGPU::AReg_256_Align2RegClass;
+ if (BitWidth <= 512)
+ return &AMDGPU::AReg_512_Align2RegClass;
+ if (BitWidth <= 1024)
+ return &AMDGPU::AReg_1024_Align2RegClass;
+
+ return nullptr;
+}
+
+const TargetRegisterClass *
+SIRegisterInfo::getAGPRClassForBitWidth(unsigned BitWidth) const {
+ if (BitWidth <= 16)
+ return &AMDGPU::AGPR_LO16RegClass;
+ if (BitWidth <= 32)
+ return &AMDGPU::AGPR_32RegClass;
+ return ST.needsAlignedVGPRs() ? getAlignedAGPRClassForBitWidth(BitWidth)
+ : getAnyAGPRClassForBitWidth(BitWidth);
+}
+
const TargetRegisterClass *
SIRegisterInfo::getSGPRClassForBitWidth(unsigned BitWidth) {
if (BitWidth <= 16)
@@ -1855,29 +1904,46 @@ SIRegisterInfo::getPhysRegClass(MCRegister Reg) const {
&AMDGPU::VGPR_32RegClass,
&AMDGPU::SReg_32RegClass,
&AMDGPU::AGPR_32RegClass,
+ &AMDGPU::AGPR_32RegClass,
+ &AMDGPU::VReg_64_Align2RegClass,
&AMDGPU::VReg_64RegClass,
&AMDGPU::SReg_64RegClass,
+ &AMDGPU::AReg_64_Align2RegClass,
&AMDGPU::AReg_64RegClass,
+ &AMDGPU::VReg_96_Align2RegClass,
&AMDGPU::VReg_96RegClass,
&AMDGPU::SReg_96RegClass,
+ &AMDGPU::AReg_96_Align2RegClass,
&AMDGPU::AReg_96RegClass,
+ &AMDGPU::VReg_128_Align2RegClass,
&AMDGPU::VReg_128RegClass,
&AMDGPU::SReg_128RegClass,
+ &AMDGPU::AReg_128_Align2RegClass,
&AMDGPU::AReg_128RegClass,
+ &AMDGPU::VReg_160_Align2RegClass,
&AMDGPU::VReg_160RegClass,
&AMDGPU::SReg_160RegClass,
+ &AMDGPU::AReg_160_Align2RegClass,
&AMDGPU::AReg_160RegClass,
+ &AMDGPU::VReg_192_Align2RegClass,
&AMDGPU::VReg_192RegClass,
&AMDGPU::SReg_192RegClass,
+ &AMDGPU::AReg_192_Align2RegClass,
&AMDGPU::AReg_192RegClass,
+ &AMDGPU::VReg_256_Align2RegClass,
&AMDGPU::VReg_256RegClass,
&AMDGPU::SReg_256RegClass,
+ &AMDGPU::AReg_256_Align2RegClass,
&AMDGPU::AReg_256RegClass,
+ &AMDGPU::VReg_512_Align2RegClass,
&AMDGPU::VReg_512RegClass,
&AMDGPU::SReg_512RegClass,
+ &AMDGPU::AReg_512_Align2RegClass,
&AMDGPU::AReg_512RegClass,
&AMDGPU::SReg_1024RegClass,
+ &AMDGPU::VReg_1024_Align2RegClass,
&AMDGPU::VReg_1024RegClass,
+ &AMDGPU::AReg_1024_Align2RegClass,
&AMDGPU::AReg_1024RegClass,
&AMDGPU::SCC_CLASSRegClass,
&AMDGPU::Pseudo_SReg_32RegClass,
@@ -1977,6 +2043,16 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
return RC;
}
+const TargetRegisterClass *
+SIRegisterInfo::getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
+ const TargetRegisterClass *SubRC,
+ unsigned SubIdx) const {
+ // Ensure this subregister index is aligned in the super register.
+ const TargetRegisterClass *MatchRC =
+ getMatchingSuperRegClass(SuperRC, SubRC, SubIdx);
+ return MatchRC && MatchRC->hasSubClassEq(SuperRC) ? MatchRC : nullptr;
+}
+
bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
if (OpType >= AMDGPU::OPERAND_REG_INLINE_AC_FIRST &&
OpType <= AMDGPU::OPERAND_REG_INLINE_AC_LAST)
@@ -2182,6 +2258,12 @@ MCRegister SIRegisterInfo::getVCC() const {
return isWave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
}
+const TargetRegisterClass *SIRegisterInfo::getVGPR64Class() const {
+ // VGPR tuples have an alignment requirement on gfx90a variants.
+ return ST.needsAlignedVGPRs() ? &AMDGPU::VReg_64_Align2RegClass
+ : &AMDGPU::VReg_64RegClass;
+}
+
const TargetRegisterClass *
SIRegisterInfo::getRegClass(unsigned RCID) const {
switch ((int)RCID) {
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index fedac6a..e3910dd 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -134,8 +134,13 @@ public:
return getEncodingValue(Reg) & 0xff;
}
- static const TargetRegisterClass *getVGPRClassForBitWidth(unsigned BitWidth);
- static const TargetRegisterClass *getAGPRClassForBitWidth(unsigned BitWidth);
+ LLVM_READONLY
+ const TargetRegisterClass *getVGPRClassForBitWidth(unsigned BitWidth) const;
+
+ LLVM_READONLY
+ const TargetRegisterClass *getAGPRClassForBitWidth(unsigned BitWidth) const;
+
+ LLVM_READONLY
static const TargetRegisterClass *getSGPRClassForBitWidth(unsigned BitWidth);
/// Return the 'base' register class for this register.
@@ -182,12 +187,21 @@ public:
const TargetRegisterClass *
getEquivalentSGPRClass(const TargetRegisterClass *VRC) const;
- /// \returns The register class that is used for a sub-register of \p RC for
- /// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will
- /// be returned.
+ /// \returns The canonical register class that is used for a sub-register of
+ /// \p RC for the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC
+ /// will be returned.
const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC,
unsigned SubIdx) const;
+ /// Returns a register class which is compatible with \p SuperRC, such that a
+ /// subregister exists with class \p SubRC with subregister index \p
+ /// SubIdx. If this is impossible (e.g., an unaligned subregister index within
+ /// a register tuple), return null.
+ const TargetRegisterClass *
+ getCompatibleSubRegClass(const TargetRegisterClass *SuperRC,
+ const TargetRegisterClass *SubRC,
+ unsigned SubIdx) const;
+
bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
unsigned DefSubReg,
const TargetRegisterClass *SrcRC,
@@ -268,6 +282,10 @@ public:
: &AMDGPU::SReg_64_XEXECRegClass;
}
+ // Return the appropriate register class to use for 64-bit VGPRs for the
+ // subtarget.
+ const TargetRegisterClass *getVGPR64Class() const;
+
MCRegister getVCC() const;
const TargetRegisterClass *getRegClass(unsigned RCID) const;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 816e844..138224c7 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -786,7 +786,7 @@ def SReg_1024 : RegisterClass<"AMDGPU", [v32i32, v32f32, v16i64, v16f64], 32,
}
// Register class for all vector registers (VGPRs + Interpolation Registers)
-class VRegClass<int numRegs, list<ValueType> regTypes, dag regList> :
+class VRegClassBase<int numRegs, list<ValueType> regTypes, dag regList> :
RegisterClass<"AMDGPU", regTypes, 32, regList> {
let Size = !mul(numRegs, 32);
@@ -796,31 +796,46 @@ class VRegClass<int numRegs, list<ValueType> regTypes, dag regList> :
let Weight = numRegs;
}
-def VReg_64 : VRegClass<2, [i64, f64, v2i32, v2f32, v4f16, v4i16, p0, p1, p4],
- (add VGPR_64)>;
-def VReg_96 : VRegClass<3, [v3i32, v3f32], (add VGPR_96)>;
-def VReg_128 : VRegClass<4, [v4i32, v4f32, v2i64, v2f64], (add VGPR_128)>;
-def VReg_160 : VRegClass<5, [v5i32, v5f32], (add VGPR_160)>;
-def VReg_192 : VRegClass<6, [untyped], (add VGPR_192)>;
-def VReg_256 : VRegClass<8, [v8i32, v8f32, v4i64, v4f64], (add VGPR_256)>;
-def VReg_512 : VRegClass<16, [v16i32, v16f32, v8i64, v8f64], (add VGPR_512)>;
-def VReg_1024 : VRegClass<32, [v32i32, v32f32, v16i64, v16f64], (add VGPR_1024)>;
+// Define a register tuple class, along with one requiring an even
+// aligned base register.
+multiclass VRegClass<int numRegs, list<ValueType> regTypes, dag regList> {
+ // Define the regular class.
+ def "" : VRegClassBase<numRegs, regTypes, regList>;
-class ARegClass<int numRegs, list<ValueType> regTypes, dag regList> :
- VRegClass<numRegs, regTypes, regList> {
- // Requires n v_accvgpr_write and n v_accvgpr_read to copy + burn 1 vgpr
- let CopyCost = !add(numRegs, numRegs, 1);
+ // Define 2-aligned variant
+ def _Align2 : VRegClassBase<numRegs, regTypes, (decimate regList, 2)>;
}
-def AReg_64 : ARegClass<2, [i64, f64, v2i32, v2f32, v4f16, v4i16],
+defm VReg_64 : VRegClass<2, [i64, f64, v2i32, v2f32, v4f16, v4i16, p0, p1, p4],
+ (add VGPR_64)>;
+defm VReg_96 : VRegClass<3, [v3i32, v3f32], (add VGPR_96)>;
+defm VReg_128 : VRegClass<4, [v4i32, v4f32, v2i64, v2f64], (add VGPR_128)>;
+defm VReg_160 : VRegClass<5, [v5i32, v5f32], (add VGPR_160)>;
+
+defm VReg_192 : VRegClass<6, [untyped], (add VGPR_192)>;
+defm VReg_256 : VRegClass<8, [v8i32, v8f32, v4i64, v4f64], (add VGPR_256)>;
+defm VReg_512 : VRegClass<16, [v16i32, v16f32, v8i64, v8f64], (add VGPR_512)>;
+defm VReg_1024 : VRegClass<32, [v32i32, v32f32, v16i64, v16f64], (add VGPR_1024)>;
+
+multiclass ARegClass<int numRegs, list<ValueType> regTypes, dag regList> {
+ let CopyCost = !add(numRegs, numRegs, 1) in {
+ // Define the regular class.
+ def "" : VRegClassBase<numRegs, regTypes, regList>;
+
+ // Define 2-aligned variant
+ def _Align2 : VRegClassBase<numRegs, regTypes, (decimate regList, 2)>;
+ }
+}
+
+defm AReg_64 : ARegClass<2, [i64, f64, v2i32, v2f32, v4f16, v4i16],
(add AGPR_64)>;
-def AReg_96 : ARegClass<3, [v3i32, v3f32], (add AGPR_96)>;
-def AReg_128 : ARegClass<4, [v4i32, v4f32, v2i64, v2f64], (add AGPR_128)>;
-def AReg_160 : ARegClass<5, [v5i32, v5f32], (add AGPR_160)>;
-def AReg_192 : ARegClass<6, [untyped], (add AGPR_192)>;
-def AReg_256 : ARegClass<8, [v8i32, v8f32, v4i64, v4f64], (add AGPR_256)>;
-def AReg_512 : ARegClass<16, [v16i32, v16f32, v8i64, v8f64], (add AGPR_512)>;
-def AReg_1024 : ARegClass<32, [v32i32, v32f32, v16i64, v16f64], (add AGPR_1024)>;
+defm AReg_96 : ARegClass<3, [v3i32, v3f32], (add AGPR_96)>;
+defm AReg_128 : ARegClass<4, [v4i32, v4f32, v2i64, v2f64], (add AGPR_128)>;
+defm AReg_160 : ARegClass<5, [v5i32, v5f32], (add AGPR_160)>;
+defm AReg_192 : ARegClass<6, [untyped], (add AGPR_192)>;
+defm AReg_256 : ARegClass<8, [v8i32, v8f32, v4i64, v4f64], (add AGPR_256)>;
+defm AReg_512 : ARegClass<16, [v16i32, v16f32, v8i64, v8f64], (add AGPR_512)>;
+defm AReg_1024 : ARegClass<32, [v32i32, v32f32, v16i64, v16f64], (add AGPR_1024)>;
} // End GeneratePressureSet = 0
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 8f9da58..e8d76cc 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1449,44 +1449,60 @@ unsigned getRegBitWidth(unsigned RCID) {
case AMDGPU::VReg_64RegClassID:
case AMDGPU::AReg_64RegClassID:
case AMDGPU::SReg_64_XEXECRegClassID:
+ case AMDGPU::VReg_64_Align2RegClassID:
+ case AMDGPU::AReg_64_Align2RegClassID:
return 64;
case AMDGPU::SGPR_96RegClassID:
case AMDGPU::SReg_96RegClassID:
case AMDGPU::VReg_96RegClassID:
case AMDGPU::AReg_96RegClassID:
+ case AMDGPU::VReg_96_Align2RegClassID:
+ case AMDGPU::AReg_96_Align2RegClassID:
case AMDGPU::AV_96RegClassID:
return 96;
case AMDGPU::SGPR_128RegClassID:
case AMDGPU::SReg_128RegClassID:
case AMDGPU::VReg_128RegClassID:
case AMDGPU::AReg_128RegClassID:
+ case AMDGPU::VReg_128_Align2RegClassID:
+ case AMDGPU::AReg_128_Align2RegClassID:
case AMDGPU::AV_128RegClassID:
return 128;
case AMDGPU::SGPR_160RegClassID:
case AMDGPU::SReg_160RegClassID:
case AMDGPU::VReg_160RegClassID:
case AMDGPU::AReg_160RegClassID:
+ case AMDGPU::VReg_160_Align2RegClassID:
+ case AMDGPU::AReg_160_Align2RegClassID:
case AMDGPU::AV_160RegClassID:
return 160;
case AMDGPU::SGPR_192RegClassID:
case AMDGPU::SReg_192RegClassID:
case AMDGPU::VReg_192RegClassID:
case AMDGPU::AReg_192RegClassID:
+ case AMDGPU::VReg_192_Align2RegClassID:
+ case AMDGPU::AReg_192_Align2RegClassID:
return 192;
case AMDGPU::SGPR_256RegClassID:
case AMDGPU::SReg_256RegClassID:
case AMDGPU::VReg_256RegClassID:
case AMDGPU::AReg_256RegClassID:
+ case AMDGPU::VReg_256_Align2RegClassID:
+ case AMDGPU::AReg_256_Align2RegClassID:
return 256;
case AMDGPU::SGPR_512RegClassID:
case AMDGPU::SReg_512RegClassID:
case AMDGPU::VReg_512RegClassID:
case AMDGPU::AReg_512RegClassID:
+ case AMDGPU::VReg_512_Align2RegClassID:
+ case AMDGPU::AReg_512_Align2RegClassID:
return 512;
case AMDGPU::SGPR_1024RegClassID:
case AMDGPU::SReg_1024RegClassID:
case AMDGPU::VReg_1024RegClassID:
case AMDGPU::AReg_1024RegClassID:
+ case AMDGPU::VReg_1024_Align2RegClassID:
+ case AMDGPU::AReg_1024_Align2RegClassID:
return 1024;
default:
llvm_unreachable("Unexpected register class");