aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp29
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h5
-rw-r--r--llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp12
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64StackTagging.cpp1
-rw-r--r--llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp161
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h4
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h2
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp14
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h14
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td11
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp29
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h23
-rw-r--r--llvm/lib/Target/ARC/ARCInstrInfo.cpp12
-rw-r--r--llvm/lib/Target/ARC/ARCInstrInfo.h6
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp106
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.h11
-rw-r--r--llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMConstantIslandPass.cpp1
-rw-r--r--llvm/lib/Target/ARM/ARMFrameLowering.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp8
-rw-r--r--llvm/lib/Target/ARM/ARMMachineFunctionInfo.h1
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/ARM/MLxExpansionPass.cpp2
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.cpp11
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.h5
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.cpp26
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.h5
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.cpp12
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.h6
-rw-r--r--llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp4
-rw-r--r--llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp4
-rw-r--r--llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp2
-rw-r--r--llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp2
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.h5
-rw-r--r--llvm/lib/Target/CSKY/CSKYFrameLowering.cpp5
-rw-r--r--llvm/lib/Target/CSKY/CSKYInstrInfo.cpp16
-rw-r--r--llvm/lib/Target/CSKY/CSKYInstrInfo.h9
-rw-r--r--llvm/lib/Target/CSKY/CSKYSubtarget.cpp2
-rw-r--r--llvm/lib/Target/CSKY/CSKYSubtarget.h2
-rw-r--r--llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp15
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenMux.cpp1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.h5
-rw-r--r--llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp20
-rw-r--r--llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h38
-rw-r--r--llvm/lib/Target/Hexagon/RDFCopy.cpp1
-rw-r--r--llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp8
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.h6
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp3
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp61
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.h9
-rw-r--r--llvm/lib/Target/M68k/M68kInstrInfo.cpp14
-rw-r--r--llvm/lib/Target/M68k/M68kInstrInfo.h6
-rw-r--r--llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp6
-rw-r--r--llvm/lib/Target/MSP430/MSP430InstrInfo.cpp13
-rw-r--r--llvm/lib/Target/MSP430/MSP430InstrInfo.h6
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.h5
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.h15
-rw-r--r--llvm/lib/Target/Mips/MipsSEFrameLowering.cpp49
-rw-r--r--llvm/lib/Target/Mips/MipsSEInstrInfo.cpp43
-rw-r--r--llvm/lib/Target/Mips/MipsSEInstrInfo.h5
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp1
-rw-r--r--llvm/lib/Target/PowerPC/PPCFrameLowering.cpp11
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp23
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.h12
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp4
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h1
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp29
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp28
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp35
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp228
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp31
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.h6
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoP.td95
-rw-r--r--llvm/lib/Target/RISCV/RISCVRegisterInfo.td10
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.cpp10
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp22
-rw-r--r--llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVIRMapping.h2
-rw-r--r--llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp48
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.h5
-rw-r--r--llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp16
-rw-r--r--llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp3
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp14
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.h6
-rw-r--r--llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp76
-rw-r--r--llvm/lib/Target/VE/VEInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/VE/VEInstrInfo.h6
-rw-r--r--llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp6
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp4
-rw-r--r--llvm/lib/Target/X86/X86FastPreTileConfig.cpp3
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp7
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td34
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp63
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h14
-rw-r--r--llvm/lib/Target/X86/X86OptimizeLEAs.cpp2
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp3
-rw-r--r--llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp6
-rw-r--r--llvm/lib/Target/XCore/XCoreFrameLowering.cpp5
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.h6
-rw-r--r--llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp2
-rw-r--r--llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp18
-rw-r--r--llvm/lib/Target/Xtensa/XtensaInstrInfo.h5
142 files changed, 1263 insertions, 825 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index cb831963..7712d2a 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -629,8 +629,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
}
const MCInstrDesc &MCID = TII->get(Opc);
// Create a dummy virtual register for the SUBS def.
- Register DestReg =
- MRI->createVirtualRegister(TII->getRegClass(MCID, 0, TRI));
+ Register DestReg = MRI->createVirtualRegister(TII->getRegClass(MCID, 0));
// Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz.
BuildMI(*Head, Head->end(), TermDL, MCID)
.addReg(DestReg, RegState::Define | RegState::Dead)
@@ -638,8 +637,7 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
.addImm(0)
.addImm(0);
// SUBS uses the GPR*sp register classes.
- MRI->constrainRegClass(HeadCond[2].getReg(),
- TII->getRegClass(MCID, 1, TRI));
+ MRI->constrainRegClass(HeadCond[2].getReg(), TII->getRegClass(MCID, 1));
}
Head->splice(Head->end(), CmpBB, CmpBB->begin(), CmpBB->end());
@@ -686,10 +684,10 @@ void SSACCmpConv::convert(SmallVectorImpl<MachineBasicBlock *> &RemovedBlocks) {
unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(CmpBBTailCC);
const MCInstrDesc &MCID = TII->get(Opc);
MRI->constrainRegClass(CmpMI->getOperand(FirstOp).getReg(),
- TII->getRegClass(MCID, 0, TRI));
+ TII->getRegClass(MCID, 0));
if (CmpMI->getOperand(FirstOp + 1).isReg())
MRI->constrainRegClass(CmpMI->getOperand(FirstOp + 1).getReg(),
- TII->getRegClass(MCID, 1, TRI));
+ TII->getRegClass(MCID, 1));
MachineInstrBuilder MIB = BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID)
.add(CmpMI->getOperand(FirstOp)); // Register Rn
if (isZBranch)
diff --git a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
index 75361f5..4ff49a6 100644
--- a/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
+++ b/llvm/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
@@ -156,7 +156,7 @@ void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
LLVM_DEBUG(dbgs() << " Ignoring, def is tied operand.\n");
continue;
}
- const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, I);
unsigned NewReg;
if (RC == nullptr) {
LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 66e4949..b93e562 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5664,7 +5664,6 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
@@ -5678,7 +5677,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
bool Offset = true;
MCRegister PNRReg = MCRegister::NoRegister;
unsigned StackID = TargetStackID::Default;
- switch (TRI->getSpillSize(*RC)) {
+ switch (RI.getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::STRBui;
@@ -5841,10 +5840,12 @@ static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
.addMemOperand(MMO);
}
-void AArch64InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
@@ -5856,7 +5857,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
bool Offset = true;
unsigned StackID = TargetStackID::Default;
Register PNRReg = MCRegister::NoRegister;
- switch (TRI->getSpillSize(*RC)) {
+ switch (TRI.getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRBui;
@@ -6492,10 +6493,10 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
"Mismatched register size in non subreg COPY");
if (IsSpill)
storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
- getRegClass(SrcReg), &TRI, Register());
+ getRegClass(SrcReg), Register());
else
loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex,
- getRegClass(DstReg), &TRI, Register());
+ getRegClass(DstReg), Register());
return &*--InsertPt;
}
@@ -6513,8 +6514,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
assert(SrcMO.getSubReg() == 0 &&
"Unexpected subreg on physical register");
storeRegToStackSlot(MBB, InsertPt, AArch64::XZR, SrcMO.isKill(),
- FrameIndex, &AArch64::GPR64RegClass, &TRI,
- Register());
+ FrameIndex, &AArch64::GPR64RegClass, Register());
return &*--InsertPt;
}
@@ -6548,7 +6548,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
TRI.getRegSizeInBits(*FillRC) &&
"Mismatched regclass size on folded subreg COPY");
- loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI,
+ loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC,
Register());
MachineInstr &LoadMI = *--InsertPt;
MachineOperand &LoadDst = LoadMI.getOperand(0);
@@ -11063,8 +11063,6 @@ static Register cloneInstr(const MachineInstr *MI, unsigned ReplaceOprNum,
MachineBasicBlock::iterator InsertTo) {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetInstrInfo *TII = MBB.getParent()->getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI =
- MBB.getParent()->getSubtarget().getRegisterInfo();
MachineInstr *NewMI = MBB.getParent()->CloneMachineInstr(MI);
Register Result = 0;
for (unsigned I = 0; I < NewMI->getNumOperands(); ++I) {
@@ -11073,8 +11071,7 @@ static Register cloneInstr(const MachineInstr *MI, unsigned ReplaceOprNum,
MRI.getRegClass(NewMI->getOperand(0).getReg()));
NewMI->getOperand(I).setReg(Result);
} else if (I == ReplaceOprNum) {
- MRI.constrainRegClass(ReplaceReg,
- TII->getRegClass(NewMI->getDesc(), I, TRI));
+ MRI.constrainRegClass(ReplaceReg, TII->getRegClass(NewMI->getDesc(), I));
NewMI->getOperand(I).setReg(ReplaceReg);
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 179574a..979c9ac 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -353,14 +353,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// This tells target independent code that it is okay to pass instructions
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 04e76c7..d25db89 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -595,17 +595,17 @@ bool AArch64MIPeepholeOpt::splitTwoPartImm(
// Determine register classes for destinations and register operands
const TargetRegisterClass *FirstInstrDstRC =
- TII->getRegClass(TII->get(Opcode.first), 0, TRI);
+ TII->getRegClass(TII->get(Opcode.first), 0);
const TargetRegisterClass *FirstInstrOperandRC =
- TII->getRegClass(TII->get(Opcode.first), 1, TRI);
+ TII->getRegClass(TII->get(Opcode.first), 1);
const TargetRegisterClass *SecondInstrDstRC =
(Opcode.first == Opcode.second)
? FirstInstrDstRC
- : TII->getRegClass(TII->get(Opcode.second), 0, TRI);
+ : TII->getRegClass(TII->get(Opcode.second), 0);
const TargetRegisterClass *SecondInstrOperandRC =
(Opcode.first == Opcode.second)
? FirstInstrOperandRC
- : TII->getRegClass(TII->get(Opcode.second), 1, TRI);
+ : TII->getRegClass(TII->get(Opcode.second), 1);
// Get old registers destinations and new register destinations
Register DstReg = MI.getOperand(0).getReg();
@@ -784,14 +784,14 @@ bool AArch64MIPeepholeOpt::visitUBFMXri(MachineInstr &MI) {
}
const TargetRegisterClass *DstRC64 =
- TII->getRegClass(TII->get(MI.getOpcode()), 0, TRI);
+ TII->getRegClass(TII->get(MI.getOpcode()), 0);
const TargetRegisterClass *DstRC32 =
TRI->getSubRegisterClass(DstRC64, AArch64::sub_32);
assert(DstRC32 && "Destination register class of UBFMXri doesn't have a "
"sub_32 subregister class");
const TargetRegisterClass *SrcRC64 =
- TII->getRegClass(TII->get(MI.getOpcode()), 1, TRI);
+ TII->getRegClass(TII->get(MI.getOpcode()), 1);
const TargetRegisterClass *SrcRC32 =
TRI->getSubRegisterClass(SrcRC64, AArch64::sub_32);
assert(SrcRC32 && "Source register class of UBFMXri doesn't have a sub_32 "
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index eaf8723..f3cf222 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -897,7 +897,7 @@ AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
const MCInstrDesc &MCID = TII->get(AArch64::ADDXri);
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
Register BaseReg = MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
- MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this));
+ MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0));
unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0);
BuildMI(*MBB, Ins, DL, MCID, BaseReg)
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index a67bd42..d87bb52 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -46,7 +46,6 @@
#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
#include <cassert>
#include <memory>
-#include <utility>
using namespace llvm;
diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 6273cfc..f5dfbdc 100644
--- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -88,7 +88,7 @@ private:
StringRef Mnemonic; ///< Instruction mnemonic.
// Map of register aliases registers via the .req directive.
- StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
+ StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
class PrefixInfo {
public:
@@ -165,7 +165,7 @@ private:
AArch64CC::CondCode parseCondCodeString(StringRef Cond,
std::string &Suggestion);
bool parseCondCode(OperandVector &Operands, bool invertCondCode);
- unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
+ MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
bool parseRegister(OperandVector &Operands);
bool parseSymbolicImmVal(const MCExpr *&ImmVal);
bool parseNeonVectorList(OperandVector &Operands);
@@ -391,7 +391,7 @@ private:
};
struct RegOp {
- unsigned RegNum;
+ MCRegister Reg;
RegKind Kind;
int ElementWidth;
@@ -417,7 +417,7 @@ private:
};
struct MatrixRegOp {
- unsigned RegNum;
+ MCRegister Reg;
unsigned ElementWidth;
MatrixKind Kind;
};
@@ -427,7 +427,7 @@ private:
};
struct VectorListOp {
- unsigned RegNum;
+ MCRegister Reg;
unsigned Count;
unsigned Stride;
unsigned NumElements;
@@ -688,12 +688,12 @@ public:
MCRegister getReg() const override {
assert(Kind == k_Register && "Invalid access!");
- return Reg.RegNum;
+ return Reg.Reg;
}
- unsigned getMatrixReg() const {
+ MCRegister getMatrixReg() const {
assert(Kind == k_MatrixRegister && "Invalid access!");
- return MatrixReg.RegNum;
+ return MatrixReg.Reg;
}
unsigned getMatrixElementWidth() const {
@@ -716,9 +716,9 @@ public:
return Reg.EqualityTy;
}
- unsigned getVectorListStart() const {
+ MCRegister getVectorListStart() const {
assert(Kind == k_VectorList && "Invalid access!");
- return VectorList.RegNum;
+ return VectorList.Reg;
}
unsigned getVectorListCount() const {
@@ -1264,15 +1264,15 @@ public:
bool isNeonVectorRegLo() const {
return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
(AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
- Reg.RegNum) ||
+ Reg.Reg) ||
AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
- Reg.RegNum));
+ Reg.Reg));
}
bool isNeonVectorReg0to7() const {
return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
(AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
- Reg.RegNum));
+ Reg.Reg));
}
bool isMatrix() const { return Kind == k_MatrixRegister; }
@@ -1401,34 +1401,34 @@ public:
bool isGPR32as64() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
- AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
+ AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.Reg);
}
bool isGPR64as32() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
- AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
+ AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.Reg);
}
bool isGPR64x8() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
- Reg.RegNum);
+ Reg.Reg);
}
bool isWSeqPair() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
- Reg.RegNum);
+ Reg.Reg);
}
bool isXSeqPair() const {
return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
- Reg.RegNum);
+ Reg.Reg);
}
bool isSyspXzrPair() const {
- return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
+ return isGPR64<AArch64::GPR64RegClassID>() && Reg.Reg == AArch64::XZR;
}
template<int64_t Angle, int64_t Remainder>
@@ -1495,7 +1495,7 @@ public:
isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
if (!Res)
return DiagnosticPredicate::NoMatch;
- if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
+ if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.Reg))
return DiagnosticPredicate::NearMatch;
return DiagnosticPredicate::Match;
}
@@ -1507,9 +1507,9 @@ public:
ElementWidth, Stride>();
if (!Res)
return DiagnosticPredicate::NoMatch;
- if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
- ((VectorList.RegNum >= AArch64::Z16) &&
- (VectorList.RegNum < (AArch64::Z16 + Stride))))
+ if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
+ ((VectorList.Reg >= AArch64::Z16) &&
+ (VectorList.Reg < (AArch64::Z16 + Stride))))
return DiagnosticPredicate::Match;
return DiagnosticPredicate::NoMatch;
}
@@ -1841,7 +1841,7 @@ public:
void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- unsigned Reg = getReg();
+ MCRegister Reg = getReg();
// Normalise to PPR
if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
Reg = Reg - AArch64::PN0 + AArch64::P0;
@@ -2336,13 +2336,12 @@ public:
}
static std::unique_ptr<AArch64Operand>
- CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
+ CreateReg(MCRegister Reg, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
- unsigned ShiftAmount = 0,
- unsigned HasExplicitAmount = false) {
+ unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
- Op->Reg.RegNum = RegNum;
+ Op->Reg.Reg = Reg;
Op->Reg.Kind = Kind;
Op->Reg.ElementWidth = 0;
Op->Reg.EqualityTy = EqTy;
@@ -2354,28 +2353,26 @@ public:
return Op;
}
- static std::unique_ptr<AArch64Operand>
- CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
- SMLoc S, SMLoc E, MCContext &Ctx,
- AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
- unsigned ShiftAmount = 0,
- unsigned HasExplicitAmount = false) {
+ static std::unique_ptr<AArch64Operand> CreateVectorReg(
+ MCRegister Reg, RegKind Kind, unsigned ElementWidth, SMLoc S, SMLoc E,
+ MCContext &Ctx, AArch64_AM::ShiftExtendType ExtTy = AArch64_AM::LSL,
+ unsigned ShiftAmount = 0, unsigned HasExplicitAmount = false) {
assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
Kind == RegKind::SVEPredicateVector ||
Kind == RegKind::SVEPredicateAsCounter) &&
"Invalid vector kind");
- auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
+ auto Op = CreateReg(Reg, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
HasExplicitAmount);
Op->Reg.ElementWidth = ElementWidth;
return Op;
}
static std::unique_ptr<AArch64Operand>
- CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
+ CreateVectorList(MCRegister Reg, unsigned Count, unsigned Stride,
unsigned NumElements, unsigned ElementWidth,
RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
- Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Reg = Reg;
Op->VectorList.Count = Count;
Op->VectorList.Stride = Stride;
Op->VectorList.NumElements = NumElements;
@@ -2586,10 +2583,10 @@ public:
}
static std::unique_ptr<AArch64Operand>
- CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
+ CreateMatrixRegister(MCRegister Reg, unsigned ElementWidth, MatrixKind Kind,
SMLoc S, SMLoc E, MCContext &Ctx) {
auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
- Op->MatrixReg.RegNum = RegNum;
+ Op->MatrixReg.Reg = Reg;
Op->MatrixReg.ElementWidth = ElementWidth;
Op->MatrixReg.Kind = Kind;
Op->StartLoc = S;
@@ -2660,9 +2657,9 @@ void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
break;
case k_VectorList: {
OS << "<vectorlist ";
- unsigned Reg = getVectorListStart();
+ MCRegister Reg = getVectorListStart();
for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
- OS << Reg + i * getVectorListStride() << " ";
+ OS << Reg.id() + i * getVectorListStride() << " ";
OS << ">";
break;
}
@@ -2699,7 +2696,7 @@ void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
OS << getCMHPriorityHintName();
break;
case k_MatrixRegister:
- OS << "<matrix " << getMatrixReg() << ">";
+ OS << "<matrix " << getMatrixReg().id() << ">";
break;
case k_MatrixTileList: {
OS << "<matrixlist ";
@@ -2715,7 +2712,7 @@ void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
break;
}
case k_Register:
- OS << "<register " << getReg() << ">";
+ OS << "<register " << getReg().id() << ">";
if (!getShiftExtendAmount() && !hasShiftExtendAmount())
break;
[[fallthrough]];
@@ -3048,53 +3045,53 @@ ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
}
// Matches a register name or register alias previously defined by '.req'
-unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
- RegKind Kind) {
- unsigned RegNum = 0;
- if ((RegNum = matchSVEDataVectorRegName(Name)))
- return Kind == RegKind::SVEDataVector ? RegNum : 0;
+MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
+ RegKind Kind) {
+ MCRegister Reg = MCRegister();
+ if ((Reg = matchSVEDataVectorRegName(Name)))
+ return Kind == RegKind::SVEDataVector ? Reg : MCRegister();
- if ((RegNum = matchSVEPredicateVectorRegName(Name)))
- return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
+ if ((Reg = matchSVEPredicateVectorRegName(Name)))
+ return Kind == RegKind::SVEPredicateVector ? Reg : MCRegister();
- if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
- return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
+ if ((Reg = matchSVEPredicateAsCounterRegName(Name)))
+ return Kind == RegKind::SVEPredicateAsCounter ? Reg : MCRegister();
- if ((RegNum = MatchNeonVectorRegName(Name)))
- return Kind == RegKind::NeonVector ? RegNum : 0;
+ if ((Reg = MatchNeonVectorRegName(Name)))
+ return Kind == RegKind::NeonVector ? Reg : MCRegister();
- if ((RegNum = matchMatrixRegName(Name)))
- return Kind == RegKind::Matrix ? RegNum : 0;
+ if ((Reg = matchMatrixRegName(Name)))
+ return Kind == RegKind::Matrix ? Reg : MCRegister();
- if (Name.equals_insensitive("zt0"))
+ if (Name.equals_insensitive("zt0"))
return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
// The parsed register must be of RegKind Scalar
- if ((RegNum = MatchRegisterName(Name)))
- return (Kind == RegKind::Scalar) ? RegNum : 0;
+ if ((Reg = MatchRegisterName(Name)))
+ return (Kind == RegKind::Scalar) ? Reg : MCRegister();
- if (!RegNum) {
+ if (!Reg) {
// Handle a few common aliases of registers.
- if (auto RegNum = StringSwitch<unsigned>(Name.lower())
- .Case("fp", AArch64::FP)
- .Case("lr", AArch64::LR)
- .Case("x31", AArch64::XZR)
- .Case("w31", AArch64::WZR)
- .Default(0))
- return Kind == RegKind::Scalar ? RegNum : 0;
+ if (MCRegister Reg = StringSwitch<unsigned>(Name.lower())
+ .Case("fp", AArch64::FP)
+ .Case("lr", AArch64::LR)
+ .Case("x31", AArch64::XZR)
+ .Case("w31", AArch64::WZR)
+ .Default(0))
+ return Kind == RegKind::Scalar ? Reg : MCRegister();
// Check for aliases registered via .req. Canonicalize to lower case.
// That's more consistent since register names are case insensitive, and
// it's how the original entry was passed in from MC/MCParser/AsmParser.
auto Entry = RegisterReqs.find(Name.lower());
if (Entry == RegisterReqs.end())
- return 0;
+ return MCRegister();
- // set RegNum if the match is the right kind of register
+ // set Reg if the match is the right kind of register
if (Kind == Entry->getValue().first)
- RegNum = Entry->getValue().second;
+ Reg = Entry->getValue().second;
}
- return RegNum;
+ return Reg;
}
unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
@@ -3122,8 +3119,8 @@ ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
return ParseStatus::NoMatch;
std::string lowerCase = Tok.getString().lower();
- unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
- if (Reg == 0)
+ MCRegister Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
+ if (!Reg)
return ParseStatus::NoMatch;
RegNum = Reg;
@@ -3667,7 +3664,7 @@ ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
}
// Try to parse matrix register.
- unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
+ MCRegister Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
if (!Reg)
return ParseStatus::NoMatch;
@@ -4130,12 +4127,12 @@ bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
SMLoc startLoc = getLoc();
const AsmToken &regTok = getTok();
StringRef reg = regTok.getString();
- unsigned RegNum = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
- if (!RegNum)
+ MCRegister Reg = matchRegisterNameAlias(reg.lower(), RegKind::Scalar);
+ if (!Reg)
return TokError("expected register operand");
Operands.push_back(AArch64Operand::CreateReg(
- RegNum, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
+ Reg, RegKind::Scalar, startLoc, getLoc(), getContext(), EqualsReg));
Lex(); // Eat token
if (parseToken(AsmToken::Comma))
@@ -4453,7 +4450,7 @@ ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
// a '.'.
size_t Start = 0, Next = Name.find('.');
StringRef Head = Name.slice(Start, Next);
- unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
+ MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
if (RegNum) {
if (Next != StringRef::npos) {
@@ -4937,13 +4934,13 @@ ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
const AsmToken &Tok = getTok();
std::string Name = Tok.getString().lower();
- unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
+ MCRegister Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
- if (RegNum == 0)
+ if (!Reg)
return ParseStatus::NoMatch;
Operands.push_back(AArch64Operand::CreateReg(
- RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
+ Reg, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
Lex(); // Eat register.
// Check if register is followed by an index
@@ -7651,7 +7648,7 @@ bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
if (parseEOL())
return true;
- auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
+ auto pair = std::make_pair(RegisterKind, RegNum);
if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
Warning(L, "ignoring redefinition of register alias '" + Name + "'");
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 54d94b1..0b61adf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -2069,6 +2069,7 @@ def FeatureISAVersion12 : FeatureSet<
FeatureMemoryAtomicFAddF32DenormalSupport,
FeatureBVHDualAndBVH8Insts,
FeatureWaitsBeforeSystemScopeStores,
+ FeatureD16Writes32BitVgpr
]>;
def FeatureISAVersion12_50 : FeatureSet<
@@ -2143,6 +2144,7 @@ def FeatureISAVersion12_50 : FeatureSet<
FeatureSupportsXNACK,
FeatureXNACK,
FeatureClusters,
+ FeatureD16Writes32BitVgpr,
]>;
def FeatureISAVersion12_51 : FeatureSet<
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
index dad94b8..8838a94 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.h
@@ -52,7 +52,7 @@ public:
}
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask) {
- return ArgDescriptor(Arg.Reg, Mask, Arg.IsStack, Arg.IsSet);
+ return ArgDescriptor(Arg.Reg.id(), Mask, Arg.IsStack, Arg.IsSet);
}
bool isSet() const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
index dc8fa7f..1765d05 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
@@ -873,6 +873,7 @@ LLT RegBankLegalizeHelper::getTyFromID(RegBankLLTMappingApplyID ID) {
case Sgpr128:
case Vgpr128:
return LLT::scalar(128);
+ case SgprP0:
case VgprP0:
return LLT::pointer(0, 64);
case SgprP1:
@@ -887,6 +888,8 @@ LLT RegBankLegalizeHelper::getTyFromID(RegBankLLTMappingApplyID ID) {
case SgprP5:
case VgprP5:
return LLT::pointer(5, 32);
+ case SgprP8:
+ return LLT::pointer(8, 128);
case SgprV2S16:
case VgprV2S16:
case UniInVgprV2S16:
@@ -972,10 +975,12 @@ RegBankLegalizeHelper::getRegBankFromID(RegBankLLTMappingApplyID ID) {
case Sgpr32_WF:
case Sgpr64:
case Sgpr128:
+ case SgprP0:
case SgprP1:
case SgprP3:
case SgprP4:
case SgprP5:
+ case SgprP8:
case SgprPtr32:
case SgprPtr64:
case SgprPtr128:
@@ -1055,10 +1060,12 @@ void RegBankLegalizeHelper::applyMappingDst(
case Sgpr32:
case Sgpr64:
case Sgpr128:
+ case SgprP0:
case SgprP1:
case SgprP3:
case SgprP4:
case SgprP5:
+ case SgprP8:
case SgprV2S16:
case SgprV2S32:
case SgprV4S32:
@@ -1198,10 +1205,12 @@ void RegBankLegalizeHelper::applyMappingSrc(
case Sgpr32:
case Sgpr64:
case Sgpr128:
+ case SgprP0:
case SgprP1:
case SgprP3:
case SgprP4:
case SgprP5:
+ case SgprP8:
case SgprV2S16:
case SgprV2S32:
case SgprV4S32: {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
index 1e5885a2..615b911 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
@@ -66,6 +66,8 @@ bool matchUniformityAndLLT(Register Reg, UniformityLLTOpPredicateID UniID,
return MRI.getType(Reg) == LLT::pointer(4, 64);
case P5:
return MRI.getType(Reg) == LLT::pointer(5, 32);
+ case P8:
+ return MRI.getType(Reg) == LLT::pointer(8, 128);
case Ptr32:
return isAnyPtr(MRI.getType(Reg), 32);
case Ptr64:
@@ -108,6 +110,8 @@ bool matchUniformityAndLLT(Register Reg, UniformityLLTOpPredicateID UniID,
return MRI.getType(Reg) == LLT::pointer(4, 64) && MUI.isUniform(Reg);
case UniP5:
return MRI.getType(Reg) == LLT::pointer(5, 32) && MUI.isUniform(Reg);
+ case UniP8:
+ return MRI.getType(Reg) == LLT::pointer(8, 128) && MUI.isUniform(Reg);
case UniPtr32:
return isAnyPtr(MRI.getType(Reg), 32) && MUI.isUniform(Reg);
case UniPtr64:
@@ -918,6 +922,15 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
addRulesForGOpcs({G_READSTEADYCOUNTER, G_READCYCLECOUNTER}, Standard)
.Uni(S64, {{Sgpr64}, {}});
+ addRulesForGOpcs({G_BLOCK_ADDR}).Any({{UniP0}, {{SgprP0}, {}}});
+
+ addRulesForGOpcs({G_GLOBAL_VALUE})
+ .Any({{UniP0}, {{SgprP0}, {}}})
+ .Any({{UniP1}, {{SgprP1}, {}}})
+ .Any({{UniP3}, {{SgprP3}, {}}})
+ .Any({{UniP4}, {{SgprP4}, {}}})
+ .Any({{UniP8}, {{SgprP8}, {}}});
+
bool hasSALUFloat = ST->hasSALUFloatInsts();
addRulesForGOpcs({G_FADD}, Standard)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
index e6df5d8..7e4ce7b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
@@ -63,6 +63,7 @@ enum UniformityLLTOpPredicateID {
P3,
P4,
P5,
+ P8,
Ptr32,
Ptr64,
Ptr128,
@@ -72,6 +73,7 @@ enum UniformityLLTOpPredicateID {
UniP3,
UniP4,
UniP5,
+ UniP8,
UniPtr32,
UniPtr64,
UniPtr128,
@@ -136,10 +138,12 @@ enum RegBankLLTMappingApplyID {
Sgpr32,
Sgpr64,
Sgpr128,
+ SgprP0,
SgprP1,
SgprP3,
SgprP4,
SgprP5,
+ SgprP8,
SgprPtr32,
SgprPtr64,
SgprPtr128,
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 09338c5..5e0486a 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -1865,7 +1865,7 @@ private:
unsigned getConstantBusLimit(unsigned Opcode) const;
bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
- unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
+ MCRegister findImplicitSGPRReadInVOP(const MCInst &Inst) const;
bool isSupportedMnemo(StringRef Mnemo,
const FeatureBitset &FBS);
@@ -3665,7 +3665,8 @@ StringRef AMDGPUAsmParser::getMatchedVariantName() const {
return "";
}
-unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
+MCRegister
+AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (MCPhysReg Reg : Desc.implicit_uses()) {
switch (Reg) {
@@ -3679,7 +3680,7 @@ unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
break;
}
}
- return AMDGPU::NoRegister;
+ return MCRegister();
}
// NB: This code is correct only when used to check constant
@@ -3854,9 +3855,9 @@ bool AMDGPUAsmParser::validateConstantBusLimitations(
LiteralSize = 4;
}
- SmallDenseSet<unsigned> SGPRsUsed;
- unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
- if (SGPRUsed != AMDGPU::NoRegister) {
+ SmallDenseSet<MCRegister> SGPRsUsed;
+ MCRegister SGPRUsed = findImplicitSGPRReadInVOP(Inst);
+ if (SGPRUsed) {
SGPRsUsed.insert(SGPRUsed);
++ConstantBusUseCount;
}
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index e3f3aba..dd3120f 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -1199,8 +1199,8 @@ void AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
// Given a wide tuple \p Reg check if it will overflow 256 registers.
// \returns \p Reg on success or NoRegister otherwise.
-static unsigned CheckVGPROverflow(unsigned Reg, const MCRegisterClass &RC,
- const MCRegisterInfo &MRI) {
+static MCRegister CheckVGPROverflow(MCRegister Reg, const MCRegisterClass &RC,
+ const MCRegisterInfo &MRI) {
unsigned NumRegs = RC.getSizeInBits() / 32;
MCRegister Sub0 = MRI.getSubReg(Reg, AMDGPU::sub0);
if (!Sub0)
@@ -1214,7 +1214,7 @@ static unsigned CheckVGPROverflow(unsigned Reg, const MCRegisterClass &RC,
assert(BaseReg && "Only vector registers expected");
- return (Sub0 - BaseReg + NumRegs <= 256) ? Reg : AMDGPU::NoRegister;
+ return (Sub0 - BaseReg + NumRegs <= 256) ? Reg : MCRegister();
}
// Note that before gfx10, the MIMG encoding provided no information about
@@ -1456,9 +1456,8 @@ MCOperand AMDGPUDisassembler::errOperand(unsigned V,
return MCOperand();
}
-inline
-MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
- return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
+inline MCOperand AMDGPUDisassembler::createRegOperand(MCRegister Reg) const {
+ return MCOperand::createReg(AMDGPU::getMCReg(Reg, STI));
}
inline
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
index d103d79..ab130db 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
@@ -69,7 +69,7 @@ public:
const char* getRegClassName(unsigned RegClassID) const;
- MCOperand createRegOperand(unsigned int RegId) const;
+ MCOperand createRegOperand(MCRegister Reg) const;
MCOperand createRegOperand(unsigned RegClassID, unsigned Val) const;
MCOperand createSRegOperand(unsigned SRegClassID, unsigned Val) const;
MCOperand createVGPR16Operand(unsigned RegIdx, bool IsHi) const;
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index 9fbf9e5..23ba4ad 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -2011,7 +2011,7 @@ void PreRARematStage::rematerialize() {
// Rematerialize DefMI to its use block.
TII->reMaterialize(*InsertPos->getParent(), InsertPos, Reg,
- AMDGPU::NoSubRegister, *DefMI, *DAG.TRI);
+ AMDGPU::NoSubRegister, *DefMI);
Remat.RematMI = &*std::prev(InsertPos);
DAG.LIS->InsertMachineInstrInMaps(*Remat.RematMI);
@@ -2163,8 +2163,7 @@ void PreRARematStage::finalizeGCNSchedStage() {
// Re-rematerialize MI at the end of its original region. Note that it may
// not be rematerialized exactly in the same position as originally within
// the region, but it should not matter much.
- TII->reMaterialize(*MBB, InsertPos, Reg, AMDGPU::NoSubRegister, RematMI,
- *DAG.TRI);
+ TII->reMaterialize(*MBB, InsertPos, Reg, AMDGPU::NoSubRegister, RematMI);
MachineInstr *NewMI = &*std::prev(InsertPos);
DAG.LIS->InsertMachineInstrInMaps(*NewMI);
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index 703ec0a..207c1da 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -336,7 +336,7 @@ void AMDGPUInstPrinter::printSymbolicFormat(const MCInst *MI,
// \returns a low 256 vgpr representing a high vgpr \p Reg [v256..v1023] or
// \p Reg itself otherwise.
-static MCPhysReg getRegForPrinting(MCPhysReg Reg, const MCRegisterInfo &MRI) {
+static MCRegister getRegForPrinting(MCRegister Reg, const MCRegisterInfo &MRI) {
unsigned Enc = MRI.getEncodingValue(Reg);
unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
if (Idx < 0x100)
@@ -355,10 +355,10 @@ static MCPhysReg getRegForPrinting(MCPhysReg Reg, const MCRegisterInfo &MRI) {
}
// Restore MSBs of a VGPR above 255 from the MCInstrAnalysis.
-static MCPhysReg getRegFromMIA(MCPhysReg Reg, unsigned OpNo,
- const MCInstrDesc &Desc,
- const MCRegisterInfo &MRI,
- const AMDGPUMCInstrAnalysis &MIA) {
+static MCRegister getRegFromMIA(MCRegister Reg, unsigned OpNo,
+ const MCInstrDesc &Desc,
+ const MCRegisterInfo &MRI,
+ const AMDGPUMCInstrAnalysis &MIA) {
unsigned VgprMSBs = MIA.getVgprMSBs();
if (!VgprMSBs)
return Reg;
@@ -403,10 +403,10 @@ void AMDGPUInstPrinter::printRegOperand(MCRegister Reg, raw_ostream &O,
}
#endif
- unsigned PrintReg = getRegForPrinting(Reg, MRI);
+ MCRegister PrintReg = getRegForPrinting(Reg, MRI);
O << getRegisterName(PrintReg);
- if (PrintReg != Reg.id())
+ if (PrintReg != Reg)
O << " /*" << getRegisterName(Reg) << "*/";
}
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 964309b..293005c 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -713,7 +713,7 @@ bool SIFoldOperandsImpl::updateOperand(FoldCandidate &Fold) const {
// Verify the register is compatible with the operand.
if (const TargetRegisterClass *OpRC =
- TII->getRegClass(MI->getDesc(), Fold.UseOpNo, TRI)) {
+ TII->getRegClass(MI->getDesc(), Fold.UseOpNo)) {
const TargetRegisterClass *NewRC =
TRI->getRegClassForReg(*MRI, New->getReg());
@@ -2394,7 +2394,7 @@ bool SIFoldOperandsImpl::tryFoldRegSequence(MachineInstr &MI) {
unsigned OpIdx = Op - &UseMI->getOperand(0);
const MCInstrDesc &InstDesc = UseMI->getDesc();
- const TargetRegisterClass *OpRC = TII->getRegClass(InstDesc, OpIdx, TRI);
+ const TargetRegisterClass *OpRC = TII->getRegClass(InstDesc, OpIdx);
if (!OpRC || !TRI->isVectorSuperClass(OpRC))
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8bb2808..768c0ab 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -17367,12 +17367,14 @@ void SITargetLowering::AddMemOpInit(MachineInstr &MI) const {
// Abandon attempt if the dst size isn't large enough
// - this is in fact an error but this is picked up elsewhere and
// reported correctly.
- uint32_t DstSize =
- TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
+ const TargetRegisterClass *DstRC = TII->getRegClass(MI.getDesc(), DstIdx);
+
+ uint32_t DstSize = TRI.getRegSizeInBits(*DstRC) / 32;
if (DstSize < InitIdx)
return;
} else if (TII->isMUBUF(MI) && AMDGPU::getMUBUFTfe(MI.getOpcode())) {
- InitIdx = TRI.getRegSizeInBits(*TII->getOpRegClass(MI, DstIdx)) / 32;
+ const TargetRegisterClass *DstRC = TII->getRegClass(MI.getDesc(), DstIdx);
+ InitIdx = TRI.getRegSizeInBits(*DstRC) / 32;
} else {
return;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 6b397e0..e5f0e3e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1668,8 +1668,7 @@ unsigned SIInstrInfo::getVectorRegSpillSaveOpcode(
void SIInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
@@ -1681,7 +1680,7 @@ void SIInstrInfo::storeRegToStackSlot(
MachineMemOperand *MMO = MF->getMachineMemOperand(
PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex),
FrameInfo.getObjectAlign(FrameIndex));
- unsigned SpillSize = TRI->getSpillSize(*RC);
+ unsigned SpillSize = RI.getSpillSize(*RC);
MachineRegisterInfo &MRI = MF->getRegInfo();
if (RI.isSGPRClass(RC)) {
@@ -1863,14 +1862,13 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
const DebugLoc &DL = MBB.findDebugLoc(MI);
- unsigned SpillSize = TRI->getSpillSize(*RC);
+ unsigned SpillSize = RI.getSpillSize(*RC);
MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(*MF, FrameIndex);
@@ -2519,8 +2517,8 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, Register DestReg,
- unsigned SubIdx, const MachineInstr &Orig,
- const TargetRegisterInfo &RI) const {
+ unsigned SubIdx,
+ const MachineInstr &Orig) const {
// Try shrinking the instruction to remat only the part needed for current
// context.
@@ -2570,7 +2568,7 @@ void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
const MCInstrDesc &TID = get(NewOpcode);
const TargetRegisterClass *NewRC =
- RI.getAllocatableClass(getRegClass(TID, 0, &RI));
+ RI.getAllocatableClass(getRegClass(TID, 0));
MRI.setRegClass(DestReg, NewRC);
UseMO->setReg(DestReg);
@@ -2600,7 +2598,7 @@ void SIInstrInfo::reMaterialize(MachineBasicBlock &MBB,
break;
}
- TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig, RI);
+ TargetInstrInfo::reMaterialize(MBB, I, DestReg, SubIdx, Orig);
}
std::pair<MachineInstr*, MachineInstr*>
@@ -3613,7 +3611,7 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
AMDGPU::V_MOV_B64_PSEUDO, AMDGPU::V_ACCVGPR_WRITE_B32_e64}) {
const MCInstrDesc &MovDesc = get(MovOp);
- const TargetRegisterClass *MovDstRC = getRegClass(MovDesc, 0, &RI);
+ const TargetRegisterClass *MovDstRC = getRegClass(MovDesc, 0);
if (Is16Bit) {
// We just need to find a correctly sized register class, so the
// subregister index compatibility doesn't matter since we're statically
@@ -6028,9 +6026,8 @@ SIInstrInfo::getWholeWaveFunctionSetup(MachineFunction &MF) const {
// FIXME: This should not be an overridable function. All subtarget dependent
// operand modifications should go through isLookupRegClassByHwMode in the
// generic handling.
-const TargetRegisterClass *
-SIInstrInfo::getRegClass(const MCInstrDesc &TID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
+const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID,
+ unsigned OpNum) const {
if (OpNum >= TID.getNumOperands())
return nullptr;
const MCOperandInfo &OpInfo = TID.operands()[OpNum];
@@ -6805,7 +6802,7 @@ void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI,
return;
const TargetRegisterClass *DeclaredRC =
- getRegClass(MI.getDesc(), SAddr->getOperandNo(), &RI);
+ getRegClass(MI.getDesc(), SAddr->getOperandNo());
Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI, DeclaredRC);
SAddr->setReg(ToSGPR);
@@ -7636,6 +7633,8 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
unsigned Opcode = Inst.getOpcode();
unsigned NewOpcode = getVALUOp(Inst);
+ const DebugLoc &DL = Inst.getDebugLoc();
+
// Handle some special cases
switch (Opcode) {
default:
@@ -7873,7 +7872,6 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
return;
case AMDGPU::S_UADDO_PSEUDO:
case AMDGPU::S_USUBO_PSEUDO: {
- const DebugLoc &DL = Inst.getDebugLoc();
MachineOperand &Dest0 = Inst.getOperand(0);
MachineOperand &Dest1 = Inst.getOperand(1);
MachineOperand &Src0 = Inst.getOperand(2);
@@ -7893,12 +7891,37 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
legalizeOperands(*NewInstr, MDT);
MRI.replaceRegWith(Dest0.getReg(), DestReg);
- addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI,
- Worklist);
+ addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
Inst.eraseFromParent();
}
return;
+ case AMDGPU::S_LSHL1_ADD_U32:
+ case AMDGPU::S_LSHL2_ADD_U32:
+ case AMDGPU::S_LSHL3_ADD_U32:
+ case AMDGPU::S_LSHL4_ADD_U32: {
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src0 = Inst.getOperand(1);
+ MachineOperand &Src1 = Inst.getOperand(2);
+ unsigned ShiftAmt = (Opcode == AMDGPU::S_LSHL1_ADD_U32 ? 1
+ : Opcode == AMDGPU::S_LSHL2_ADD_U32 ? 2
+ : Opcode == AMDGPU::S_LSHL3_ADD_U32 ? 3
+ : 4);
+ const TargetRegisterClass *NewRC =
+ RI.getEquivalentVGPRClass(MRI.getRegClass(Dest.getReg()));
+ Register DestReg = MRI.createVirtualRegister(NewRC);
+ MachineInstr *NewInstr =
+ BuildMI(*MBB, &Inst, DL, get(AMDGPU::V_LSHL_ADD_U32_e64), DestReg)
+ .add(Src0)
+ .addImm(ShiftAmt)
+ .add(Src1);
+
+ legalizeOperands(*NewInstr, MDT);
+ MRI.replaceRegWith(Dest.getReg(), DestReg);
+ addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist);
+ Inst.eraseFromParent();
+ }
+ return;
case AMDGPU::S_CSELECT_B32:
case AMDGPU::S_CSELECT_B64:
lowerSelect(Worklist, Inst, MDT);
@@ -7995,7 +8018,6 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
return;
}
case AMDGPU::S_CVT_HI_F32_F16: {
- const DebugLoc &DL = Inst.getDebugLoc();
Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
Register NewDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
if (ST.useRealTrue16Insts()) {
@@ -8025,7 +8047,6 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
}
case AMDGPU::S_MINIMUM_F32:
case AMDGPU::S_MAXIMUM_F32: {
- const DebugLoc &DL = Inst.getDebugLoc();
Register NewDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
MachineInstr *NewInstr = BuildMI(*MBB, Inst, DL, get(NewOpcode), NewDst)
.addImm(0) // src0_modifiers
@@ -8043,7 +8064,6 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
}
case AMDGPU::S_MINIMUM_F16:
case AMDGPU::S_MAXIMUM_F16: {
- const DebugLoc &DL = Inst.getDebugLoc();
Register NewDst = MRI.createVirtualRegister(ST.useRealTrue16Insts()
? &AMDGPU::VGPR_16RegClass
: &AMDGPU::VGPR_32RegClass);
@@ -8067,7 +8087,6 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
case AMDGPU::V_S_RCP_F16_e64:
case AMDGPU::V_S_RSQ_F16_e64:
case AMDGPU::V_S_SQRT_F16_e64: {
- const DebugLoc &DL = Inst.getDebugLoc();
Register NewDst = MRI.createVirtualRegister(ST.useRealTrue16Insts()
? &AMDGPU::VGPR_16RegClass
: &AMDGPU::VGPR_32RegClass);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 8d693b1..c048b85 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -307,22 +307,19 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
// Splits a V_MOV_B64_DPP_PSEUDO opcode into a pair of v_mov_b32_dpp
// instructions. Returns a pair of generated instructions.
@@ -1622,9 +1619,8 @@ public:
/// Return true if this opcode should not be used by codegen.
bool isAsmOnlyOpcode(int MCOp) const;
- const TargetRegisterClass *
- getRegClass(const MCInstrDesc &TID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
+ unsigned OpNum) const override;
void fixImplicitOperands(MachineInstr &MI) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index b7f63ec..0bde5d3 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -776,11 +776,7 @@ def xnor : PatFrag <
foreach I = 1-4 in {
def shl#I#_add : PatFrag <
(ops node:$src0, node:$src1),
- (add (shl_oneuse $src0, (i32 I)), $src1)> {
- // FIXME: Poor substitute for disabling pattern in SelectionDAG
- let PredicateCode = [{return false;}];
- let GISelPredicateCode = [{return true;}];
-}
+ (add (shl_oneuse $src0, (i32 I)), $src1)>;
}
multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0,
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 6f1feb1..6dd4b1d 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -791,6 +791,17 @@ def : GCNPat<
(SI_CALL_ISEL $src0, (i64 0))
>;
+// Funnel shift right (fshr) patterns for uniform inputs.
+// These patterns implement this using scalar instructions by constructing a 64-bit
+// value {a, b} and performing a single right shift.
+def : GCNPat<(UniformTernaryFrag<fshr> i32:$src0, i32:$src1, i32:$src2),
+ (i32 (EXTRACT_SUBREG (S_LSHR_B64 (REG_SEQUENCE SReg_64, $src1, sub0, $src0, sub1), (S_AND_B32 $src2, (i32 31))), sub0))
+>;
+
+def : GCNPat<(UniformTernaryFrag<fshr> i32:$src0, i32:$src1, (i32 ShiftAmt32Imm:$src2)),
+ (i32 (EXTRACT_SUBREG (S_LSHR_B64 (REG_SEQUENCE SReg_64, $src1, sub0, $src0, sub1), $src2), sub0))
+>;
+
// Wrapper around s_swappc_b64 with extra $callee parameter to track
// the called function after regalloc.
def SI_CALL : SPseudoInstSI <
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 00aae2c9..fcf91e0 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -1337,11 +1337,9 @@ SILoadStoreOptimizer::checkAndPrepareMerge(CombineInfo &CI,
int Data1Idx = AMDGPU::getNamedOperandIdx(Write2Opc.getOpcode(),
AMDGPU::OpName::data1);
- const TargetRegisterClass *DataRC0 =
- TII->getRegClass(Write2Opc, Data0Idx, TRI);
+ const TargetRegisterClass *DataRC0 = TII->getRegClass(Write2Opc, Data0Idx);
- const TargetRegisterClass *DataRC1 =
- TII->getRegClass(Write2Opc, Data1Idx, TRI);
+ const TargetRegisterClass *DataRC1 = TII->getRegClass(Write2Opc, Data1Idx);
if (unsigned SubReg = Data0->getSubReg()) {
DataRC0 = TRI->getMatchingSuperRegClass(MRI->getRegClass(Data0->getReg()),
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index 40eeeb8..cbd08f0 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -117,27 +117,26 @@ static void insertCSRSaves(MachineBasicBlock &SaveBlock,
MachineFunction &MF = *SaveBlock.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const SIRegisterInfo *RI = ST.getRegisterInfo();
MachineBasicBlock::iterator I = SaveBlock.begin();
- if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
+ if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, RI)) {
for (const CalleeSavedInfo &CS : CSI) {
// Insert the spill to the stack frame.
MCRegister Reg = CS.getReg();
MachineInstrSpan MIS(I, &SaveBlock);
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(
+ const TargetRegisterClass *RC = RI->getMinimalPhysRegClass(
Reg, Reg == RI->getReturnAddressReg(MF) ? MVT::i64 : MVT::i32);
// If this value was already livein, we probably have a direct use of the
// incoming register value, so don't kill at the spill point. This happens
// since we pass some special inputs (workgroup IDs) in the callee saved
// range.
- const bool IsLiveIn = isLiveIntoMBB(Reg, SaveBlock, TRI);
+ const bool IsLiveIn = isLiveIntoMBB(Reg, SaveBlock, RI);
TII.storeRegToStackSlot(SaveBlock, I, Reg, !IsLiveIn, CS.getFrameIdx(),
- RC, TRI, Register());
+ RC, Register());
if (Indexes) {
assert(std::distance(MIS.begin(), I) == 1);
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index caff354..86ca22c 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1346,7 +1346,7 @@ void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI,
continue;
unsigned I = Op.getOperandNo();
- const TargetRegisterClass *OpRC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *OpRC = TII->getRegClass(Desc, I);
if (!OpRC || !TRI->isVSSuperClass(OpRC))
continue;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index a6c1af2..7559eff 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -3046,7 +3046,7 @@ bool SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
if (!IsMUBUF && !MFI->isBottomOfStack()) {
// Convert to a swizzled stack address by scaling by the wave size.
// In an entry function/kernel the offset is already swizzled.
- bool IsSALU = isSGPRClass(TII->getOpRegClass(*MI, FIOperandNum));
+ bool IsSALU = isSGPRClass(TII->getRegClass(MI->getDesc(), FIOperandNum));
bool LiveSCC = RS->isRegUsed(AMDGPU::SCC) &&
!MI->definesRegister(AMDGPU::SCC, /*TRI=*/nullptr);
const TargetRegisterClass *RC = IsSALU && !LiveSCC
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 3e1b058..37bf2d2 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -897,7 +897,7 @@ unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
}
std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
- std::function<unsigned(unsigned, unsigned)> GetRegIdx,
+ std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
const MCRegisterInfo &MRI, bool SkipSrc, bool AllowSameVGPR,
bool VOPD3) const {
@@ -914,12 +914,13 @@ std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
BaseX = X;
if (!BaseY)
BaseY = Y;
- if ((BaseX & BanksMask) == (BaseY & BanksMask))
+ if ((BaseX.id() & BanksMask) == (BaseY.id() & BanksMask))
return true;
if (BaseX != X /* This is 64-bit register */ &&
- ((BaseX + 1) & BanksMask) == (BaseY & BanksMask))
+ ((BaseX.id() + 1) & BanksMask) == (BaseY.id() & BanksMask))
return true;
- if (BaseY != Y && (BaseX & BanksMask) == ((BaseY + 1) & BanksMask))
+ if (BaseY != Y &&
+ (BaseX.id() & BanksMask) == ((BaseY.id() + 1) & BanksMask))
return true;
// If both are 64-bit bank conflict will be detected yet while checking
@@ -968,7 +969,7 @@ std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
// if the operand is not a register or not a VGPR.
InstInfo::RegIndices
InstInfo::getRegIndices(unsigned CompIdx,
- std::function<unsigned(unsigned, unsigned)> GetRegIdx,
+ std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
bool VOPD3) const {
assert(CompIdx < COMPONENTS_NUM);
@@ -983,7 +984,7 @@ InstInfo::getRegIndices(unsigned CompIdx,
Comp.hasRegSrcOperand(CompSrcIdx)
? GetRegIdx(CompIdx,
Comp.getIndexOfSrcInMCOperands(CompSrcIdx, VOPD3))
- : 0;
+ : MCRegister();
}
return RegIndices;
}
@@ -2697,8 +2698,8 @@ MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI) {
MCRegister mc2PseudoReg(MCRegister Reg) { MAP_REG2REG }
-bool isInlineValue(unsigned Reg) {
- switch (Reg) {
+bool isInlineValue(MCRegister Reg) {
+ switch (Reg.id()) {
case AMDGPU::SRC_SHARED_BASE_LO:
case AMDGPU::SRC_SHARED_BASE:
case AMDGPU::SRC_SHARED_LIMIT_LO:
@@ -3361,7 +3362,7 @@ const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
: getGfx9BufferFormatInfo(Format);
}
-const MCRegisterClass *getVGPRPhysRegClass(MCPhysReg Reg,
+const MCRegisterClass *getVGPRPhysRegClass(MCRegister Reg,
const MCRegisterInfo &MRI) {
const unsigned VGPRClasses[] = {
AMDGPU::VGPR_16RegClassID, AMDGPU::VGPR_32RegClassID,
@@ -3382,22 +3383,22 @@ const MCRegisterClass *getVGPRPhysRegClass(MCPhysReg Reg,
return nullptr;
}
-unsigned getVGPREncodingMSBs(MCPhysReg Reg, const MCRegisterInfo &MRI) {
+unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI) {
unsigned Enc = MRI.getEncodingValue(Reg);
unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
return Idx >> 8;
}
-MCPhysReg getVGPRWithMSBs(MCPhysReg Reg, unsigned MSBs,
- const MCRegisterInfo &MRI) {
+MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs,
+ const MCRegisterInfo &MRI) {
unsigned Enc = MRI.getEncodingValue(Reg);
unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
if (Idx >= 0x100)
- return AMDGPU::NoRegister;
+ return MCRegister();
const MCRegisterClass *RC = getVGPRPhysRegClass(Reg, MRI);
if (!RC)
- return AMDGPU::NoRegister;
+ return MCRegister();
Idx |= MSBs << 8;
if (RC->getID() == AMDGPU::VGPR_16RegClassID) {
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index 5e3195b..9f65f93 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -909,7 +909,7 @@ private:
const ComponentInfo CompInfo[COMPONENTS_NUM];
public:
- using RegIndices = std::array<unsigned, Component::MAX_OPR_NUM>;
+ using RegIndices = std::array<MCRegister, Component::MAX_OPR_NUM>;
InstInfo(const MCInstrDesc &OpX, const MCInstrDesc &OpY)
: CompInfo{OpX, OpY} {}
@@ -932,9 +932,10 @@ public:
// even though it violates requirement to be from different banks.
// If \p VOPD3 is set to true both dst registers allowed to be either odd
// or even and instruction may have real src2 as opposed to tied accumulator.
- bool hasInvalidOperand(std::function<unsigned(unsigned, unsigned)> GetRegIdx,
- const MCRegisterInfo &MRI, bool SkipSrc = false,
- bool AllowSameVGPR = false, bool VOPD3 = false) const {
+ bool
+ hasInvalidOperand(std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
+ const MCRegisterInfo &MRI, bool SkipSrc = false,
+ bool AllowSameVGPR = false, bool VOPD3 = false) const {
return getInvalidCompOperandIndex(GetRegIdx, MRI, SkipSrc, AllowSameVGPR,
VOPD3)
.has_value();
@@ -949,14 +950,14 @@ public:
// If \p VOPD3 is set to true both dst registers allowed to be either odd
// or even and instruction may have real src2 as opposed to tied accumulator.
std::optional<unsigned> getInvalidCompOperandIndex(
- std::function<unsigned(unsigned, unsigned)> GetRegIdx,
+ std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
const MCRegisterInfo &MRI, bool SkipSrc = false,
bool AllowSameVGPR = false, bool VOPD3 = false) const;
private:
RegIndices
getRegIndices(unsigned ComponentIdx,
- std::function<unsigned(unsigned, unsigned)> GetRegIdx,
+ std::function<MCRegister(unsigned, unsigned)> GetRegIdx,
bool VOPD3) const;
};
@@ -1599,7 +1600,7 @@ LLVM_READNONE
MCRegister mc2PseudoReg(MCRegister Reg);
LLVM_READNONE
-bool isInlineValue(unsigned Reg);
+bool isInlineValue(MCRegister Reg);
/// Is this an AMDGPU specific source operand? These include registers,
/// inline constants, literals and mandatory literals (KImm).
@@ -1798,16 +1799,16 @@ bool isIntrinsicAlwaysUniform(unsigned IntrID);
/// \returns a register class for the physical register \p Reg if it is a VGPR
/// or nullptr otherwise.
-const MCRegisterClass *getVGPRPhysRegClass(MCPhysReg Reg,
+const MCRegisterClass *getVGPRPhysRegClass(MCRegister Reg,
const MCRegisterInfo &MRI);
/// \returns the MODE bits which have to be set by the S_SET_VGPR_MSB for the
/// physical register \p Reg.
-unsigned getVGPREncodingMSBs(MCPhysReg Reg, const MCRegisterInfo &MRI);
+unsigned getVGPREncodingMSBs(MCRegister Reg, const MCRegisterInfo &MRI);
/// If \p Reg is a low VGPR return a corresponding high VGPR with \p MSBs set.
-MCPhysReg getVGPRWithMSBs(MCPhysReg Reg, unsigned MSBs,
- const MCRegisterInfo &MRI);
+MCRegister getVGPRWithMSBs(MCRegister Reg, unsigned MSBs,
+ const MCRegisterInfo &MRI);
// Returns a table for the opcode with a given \p Desc to map the VGPR MSB
// set by the S_SET_VGPR_MSB to one of 4 sources. In case of VOPD returns 2
diff --git a/llvm/lib/Target/ARC/ARCInstrInfo.cpp b/llvm/lib/Target/ARC/ARCInstrInfo.cpp
index 2dec6ff..e17ecbf 100644
--- a/llvm/lib/Target/ARC/ARCInstrInfo.cpp
+++ b/llvm/lib/Target/ARC/ARCInstrInfo.cpp
@@ -294,8 +294,7 @@ void ARCInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void ARCInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
MachineFunction &MF = *MBB.getParent();
@@ -307,11 +306,11 @@ void ARCInstrInfo::storeRegToStackSlot(
MFI.getObjectAlign(FrameIndex));
assert(MMO && "Couldn't get MachineMemOperand for store to stack.");
- assert(TRI->getSpillSize(*RC) == 4 &&
+ assert(TRI.getSpillSize(*RC) == 4 &&
"Only support 4-byte stores to stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
- LLVM_DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, TRI)
+ LLVM_DEBUG(dbgs() << "Created store reg=" << printReg(SrcReg, &TRI)
<< " to FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, DL, get(ARC::ST_rs9))
.addReg(SrcReg, getKillRegState(IsKill))
@@ -324,7 +323,6 @@ void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
@@ -336,11 +334,11 @@ void ARCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MFI.getObjectAlign(FrameIndex));
assert(MMO && "Couldn't get MachineMemOperand for store to stack.");
- assert(TRI->getSpillSize(*RC) == 4 &&
+ assert(TRI.getSpillSize(*RC) == 4 &&
"Only support 4-byte loads from stack now.");
assert(ARC::GPR32RegClass.hasSubClassEq(RC) &&
"Only support GPR32 stores to stack now.");
- LLVM_DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, TRI)
+ LLVM_DEBUG(dbgs() << "Created load reg=" << printReg(DestReg, &TRI)
<< " from FrameIndex=" << FrameIndex << "\n");
BuildMI(MBB, I, DL, get(ARC::LD_rs9))
.addReg(DestReg, RegState::Define)
diff --git a/llvm/lib/Target/ARC/ARCInstrInfo.h b/llvm/lib/Target/ARC/ARCInstrInfo.h
index 2cf05ba..ebeaf87 100644
--- a/llvm/lib/Target/ARC/ARCInstrInfo.h
+++ b/llvm/lib/Target/ARC/ARCInstrInfo.h
@@ -70,14 +70,12 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index b466ca6f..6077c18 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -929,15 +929,15 @@ ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
return TargetInstrInfo::describeLoadedValue(MI, Reg);
}
-const MachineInstrBuilder &
-ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
- unsigned SubIdx, unsigned State,
- const TargetRegisterInfo *TRI) const {
+const MachineInstrBuilder &ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB,
+ unsigned Reg,
+ unsigned SubIdx,
+ unsigned State) const {
if (!SubIdx)
return MIB.addReg(Reg, State);
if (Register::isPhysicalRegister(Reg))
- return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
+ return MIB.addReg(getRegisterInfo().getSubReg(Reg, SubIdx), State);
return MIB.addReg(Reg, State, SubIdx);
}
@@ -945,18 +945,18 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
Align Alignment = MFI.getObjectAlign(FI);
+ const ARMBaseRegisterInfo &TRI = getRegisterInfo();
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), Alignment);
- switch (TRI->getSpillSize(*RC)) {
+ switch (TRI.getSpillSize(*RC)) {
case 2:
if (ARM::HPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DebugLoc(), get(ARM::VSTRH))
@@ -1011,8 +1011,8 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
} else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
if (Subtarget.hasV5TEOps()) {
MachineInstrBuilder MIB = BuildMI(MBB, I, DebugLoc(), get(ARM::STRD));
- AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
- AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill));
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
.add(predOps(ARMCC::AL));
} else {
@@ -1022,8 +1022,8 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.addMemOperand(MMO)
.add(predOps(ARMCC::AL));
- AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
- AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill));
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -1073,9 +1073,9 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill));
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
+ AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -1105,10 +1105,10 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
- AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill));
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
+ AddDReg(MIB, SrcReg, ARM::dsub_3, 0);
}
} else
llvm_unreachable("Unknown reg class!");
@@ -1125,14 +1125,14 @@ void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
- MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
- AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill));
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0);
+ MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0);
+ AddDReg(MIB, SrcReg, ARM::dsub_7, 0);
} else
llvm_unreachable("Unknown reg class!");
break;
@@ -1208,10 +1208,12 @@ Register ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
return false;
}
-void ARMBaseInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void ARMBaseInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineFunction &MF = *MBB.getParent();
@@ -1221,7 +1223,8 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), Alignment);
- switch (TRI->getSpillSize(*RC)) {
+ const ARMBaseRegisterInfo &TRI = getRegisterInfo();
+ switch (TRI.getSpillSize(*RC)) {
case 2:
if (ARM::HPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(ARM::VLDRH), DestReg)
@@ -1272,8 +1275,8 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
if (Subtarget.hasV5TEOps()) {
MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
- AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
- AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead);
+ AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead);
MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO)
.add(predOps(ARMCC::AL));
} else {
@@ -1283,8 +1286,8 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.addMemOperand(MMO)
.add(predOps(ARMCC::AL));
- MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead);
}
if (DestReg.isPhysical())
@@ -1330,9 +1333,9 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.addMemOperand(MMO)
.add(predOps(ARMCC::AL));
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead);
if (DestReg.isPhysical())
MIB.addReg(DestReg, RegState::ImplicitDefine);
}
@@ -1359,10 +1362,10 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead);
if (DestReg.isPhysical())
MIB.addReg(DestReg, RegState::ImplicitDefine);
}
@@ -1380,14 +1383,14 @@ void ARMBaseInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.add(predOps(ARMCC::AL))
.addMemOperand(MMO);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
- MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead);
+ MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead);
if (DestReg.isPhysical())
MIB.addReg(DestReg, RegState::ImplicitDefine);
} else
@@ -1653,8 +1656,7 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const {
+ const MachineInstr &Orig) const {
unsigned Opcode = Orig.getOpcode();
switch (Opcode) {
default: {
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 27f8e3b..04e2ab0 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -216,14 +216,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
@@ -232,16 +231,14 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
MachineInstr &
duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
const MachineInstr &Orig) const override;
const MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
- unsigned SubIdx, unsigned State,
- const TargetRegisterInfo *TRI) const;
+ unsigned SubIdx, unsigned State) const;
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1,
const MachineRegisterInfo *MRI) const override;
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index ce1cdb3..80921ce 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -708,7 +708,7 @@ ARMBaseRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const MCInstrDesc &MCID = TII.get(ADDriOpc);
Register BaseReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0));
MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
@@ -881,8 +881,7 @@ ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
const MCInstrDesc &MCID = MI.getDesc();
- const TargetRegisterClass *RegClass =
- TII.getRegClass(MCID, FIOperandNum, this);
+ const TargetRegisterClass *RegClass = TII.getRegClass(MCID, FIOperandNum);
if (Offset == 0 && (FrameReg.isVirtual() || RegClass->contains(FrameReg)))
// Must be addrmode4/6.
diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
index f43ec73..80494d9 100644
--- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -51,7 +51,6 @@
#include <cassert>
#include <cstdint>
#include <iterator>
-#include <utility>
#include <vector>
using namespace llvm;
diff --git a/llvm/lib/Target/ARM/ARMFrameLowering.cpp b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
index 138981a..21a1135 100644
--- a/llvm/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMFrameLowering.cpp
@@ -2342,7 +2342,6 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
const ARMBaseInstrInfo &TII =
*static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned Limit = (1 << 12) - 1;
for (auto &MBB : MF) {
for (auto &MI : MBB) {
@@ -2364,7 +2363,7 @@ static unsigned estimateRSStackSizeLimit(MachineFunction &MF,
break;
const MCInstrDesc &MCID = MI.getDesc();
- const TargetRegisterClass *RegClass = TII.getRegClass(MCID, i, TRI);
+ const TargetRegisterClass *RegClass = TII.getRegClass(MCID, i);
if (RegClass && !RegClass->contains(ARM::SP))
HasNonSPFrameIndex = true;
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index cd4299b..db37b76 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -2424,7 +2424,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(
Ops.pop_back();
const MCInstrDesc &MCID = TII->get(NewOpc);
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0);
MRI->constrainRegClass(FirstReg, TRC);
MRI->constrainRegClass(SecondReg, TRC);
@@ -3014,7 +3014,7 @@ static void AdjustBaseAndOffset(MachineInstr *MI, Register NewBaseReg,
MachineFunction *MF = MI->getMF();
MachineRegisterInfo &MRI = MF->getRegInfo();
const MCInstrDesc &MCID = TII->get(MI->getOpcode());
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, BaseOp, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, BaseOp);
MRI.constrainRegClass(NewBaseReg, TRC);
int OldOffset = MI->getOperand(BaseOp + 1).getImm();
@@ -3071,10 +3071,10 @@ static MachineInstr *createPostIncLoadStore(MachineInstr *MI, int Offset,
const MCInstrDesc &MCID = TII->get(NewOpcode);
// Constrain the def register class
- const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
+ const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0);
MRI.constrainRegClass(NewReg, TRC);
// And do the same for the base operand
- TRC = TII->getRegClass(MCID, 2, TRI);
+ TRC = TII->getRegClass(MCID, 2);
MRI.constrainRegClass(MI->getOperand(1).getReg(), TRC);
unsigned AddrMode = (MCID.TSFlags & ARMII::AddrModeMask);
diff --git a/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h b/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
index 72eb3d0..b689760 100644
--- a/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/llvm/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -19,7 +19,6 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/Support/ErrorHandling.h"
-#include <utility>
namespace llvm {
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 01fe13b..f8196e4 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -1238,7 +1238,7 @@ uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
// Verify standard frame (lr/r7) was used.
if (CFARegister != ARM::R7) {
DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
- << CFARegister
+ << CFARegister.id()
<< " instead of r7\n");
return CU::UNWIND_ARM_MODE_DWARF;
}
diff --git a/llvm/lib/Target/ARM/MLxExpansionPass.cpp b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
index 8e1bf1d..eb237b4 100644
--- a/llvm/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/llvm/lib/Target/ARM/MLxExpansionPass.cpp
@@ -283,7 +283,7 @@ MLxExpansion::ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI,
const MCInstrDesc &MCID1 = TII->get(MulOpc);
const MCInstrDesc &MCID2 = TII->get(AddSubOpc);
- Register TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0, TRI));
+ Register TmpReg = MRI->createVirtualRegister(TII->getRegClass(MCID1, 0));
MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), MCID1, TmpReg)
.addReg(Src1Reg, getKillRegState(Src1Kill))
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index f95ba6a4..01f588f 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -116,7 +116,6 @@ void Thumb1InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
assert((RC == &ARM::tGPRRegClass ||
@@ -142,10 +141,12 @@ void Thumb1InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
}
-void Thumb1InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void Thumb1InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
assert((RC->hasSuperClassEq(&ARM::tGPRRegClass) ||
(DestReg.isPhysical() && isARMLowRegister(DestReg))) &&
"Unknown regclass!");
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index 16350a6..289a30a 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -43,14 +43,13 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool canCopyGluedNodeDuringSchedule(SDNode *N) const override;
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index b66e407..efb92c9 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -165,7 +165,6 @@ void Thumb2InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -197,20 +196,22 @@ void Thumb2InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
- AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
- AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
+ AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill));
+ AddDReg(MIB, SrcReg, ARM::gsub_1, 0);
MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
return;
}
- ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI,
+ ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC,
Register());
}
-void Thumb2InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void Thumb2InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
MachineMemOperand *MMO = MF.getMachineMemOperand(
@@ -238,8 +239,8 @@ void Thumb2InstrInfo::loadRegFromStackSlot(
}
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
- AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
- AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
+ AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead);
+ AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead);
MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
if (DestReg.isPhysical())
@@ -247,8 +248,7 @@ void Thumb2InstrInfo::loadRegFromStackSlot(
return;
}
- ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI,
- Register());
+ ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, Register());
}
void Thumb2InstrInfo::expandLoadStackGuard(
@@ -564,7 +564,7 @@ bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
bool isSub = false;
MachineFunction &MF = *MI.getParent()->getParent();
- const TargetRegisterClass *RegClass = TII.getRegClass(Desc, FrameRegIdx, TRI);
+ const TargetRegisterClass *RegClass = TII.getRegClass(Desc, FrameRegIdx);
// Memory operands in inline assembly always use AddrModeT2_i12.
if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 59ef39d..1e11cb3 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -44,14 +44,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
index 5e247cb..6c37ba1 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
@@ -126,8 +126,7 @@ Register AVRInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
void AVRInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
@@ -142,9 +141,9 @@ void AVRInstrInfo::storeRegToStackSlot(
MFI.getObjectAlign(FrameIndex));
unsigned Opcode = 0;
- if (TRI->isTypeLegalForClass(*RC, MVT::i8)) {
+ if (RI.isTypeLegalForClass(*RC, MVT::i8)) {
Opcode = AVR::STDPtrQRr;
- } else if (TRI->isTypeLegalForClass(*RC, MVT::i16)) {
+ } else if (RI.isTypeLegalForClass(*RC, MVT::i16)) {
Opcode = AVR::STDWPtrQRr;
} else {
llvm_unreachable("Cannot store this register into a stack slot!");
@@ -161,7 +160,6 @@ void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction &MF = *MBB.getParent();
@@ -173,9 +171,9 @@ void AVRInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MFI.getObjectAlign(FrameIndex));
unsigned Opcode = 0;
- if (TRI->isTypeLegalForClass(*RC, MVT::i8)) {
+ if (TRI.isTypeLegalForClass(*RC, MVT::i8)) {
Opcode = AVR::LDDRdPtrQ;
- } else if (TRI->isTypeLegalForClass(*RC, MVT::i16)) {
+ } else if (TRI.isTypeLegalForClass(*RC, MVT::i16)) {
// Opcode = AVR::LDDWRdPtrQ;
//: FIXME: remove this once PR13375 gets fixed
Opcode = AVR::LDDWRdYQ;
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.h b/llvm/lib/Target/AVR/AVRInstrInfo.h
index 759aea2..4db535a 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.h
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.h
@@ -79,13 +79,11 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
Register isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
diff --git a/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp b/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
index fc794c4..48452f6 100644
--- a/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
+++ b/llvm/lib/Target/AVR/AsmParser/AVRAsmParser.cpp
@@ -252,7 +252,7 @@ public:
O << "Token: \"" << getToken() << "\"";
break;
case k_Register:
- O << "Register: " << getReg();
+ O << "Register: " << getReg().id();
break;
case k_Immediate:
O << "Immediate: \"";
@@ -262,7 +262,7 @@ public:
case k_Memri: {
// only manually print the size for non-negative values,
// as the sign is inserted automatically.
- O << "Memri: \"" << getReg() << '+';
+ O << "Memri: \"" << getReg().id() << '+';
MAI.printExpr(O, *getImm());
O << "\"";
break;
diff --git a/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp b/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp
index 5548ad1..84a64ba 100644
--- a/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp
+++ b/llvm/lib/Target/AVR/Disassembler/AVRDisassembler.cpp
@@ -82,7 +82,7 @@ static DecodeStatus DecodeGPR8RegisterClass(MCInst &Inst, unsigned RegNo,
if (RegNo > 31)
return MCDisassembler::Fail;
- unsigned Register = GPRDecoderTable[RegNo];
+ MCRegister Register = GPRDecoderTable[RegNo];
Inst.addOperand(MCOperand::createReg(Register));
return MCDisassembler::Success;
}
@@ -174,7 +174,7 @@ static DecodeStatus decodeLoadStore(MCInst &Inst, unsigned Insn,
uint64_t Address,
const MCDisassembler *Decoder) {
// Get the register will be loaded or stored.
- unsigned RegVal = GPRDecoderTable[(Insn >> 4) & 0x1f];
+ MCRegister RegVal = GPRDecoderTable[(Insn >> 4) & 0x1f];
// Decode LDD/STD with offset less than 8.
if ((Insn & 0xf000) == 0x8000) {
diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp
index 4bb16e2..fbb130c 100644
--- a/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp
@@ -96,7 +96,7 @@ AVRMCCodeEmitter::loadStorePostEncoder(const MCInst &MI, unsigned EncodedValue,
EncodedValue |= (1 << 12);
// Encode the pointer register.
- switch (MI.getOperand(Idx).getReg()) {
+ switch (MI.getOperand(Idx).getReg().id()) {
case AVR::R27R26:
EncodedValue |= 0xc;
break;
diff --git a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
index d96f403d..9f86322 100644
--- a/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
+++ b/llvm/lib/Target/BPF/AsmParser/BPFAsmParser.cpp
@@ -172,7 +172,7 @@ public:
break;
case Register:
OS << "<register x";
- OS << getReg() << ">";
+ OS << getReg().id() << ">";
break;
case Token:
OS << "'" << getToken() << "'";
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.cpp b/llvm/lib/Target/BPF/BPFInstrInfo.cpp
index 0e56e65..095e249 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.cpp
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.cpp
@@ -127,7 +127,6 @@ void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -148,10 +147,12 @@ void BPFInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Can't store this register to stack slot");
}
-void BPFInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void BPFInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.h b/llvm/lib/Target/BPF/BPFInstrInfo.h
index 911e880..d3ef9bc 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.h
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.h
@@ -39,14 +39,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
diff --git a/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp b/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
index e81bb47..9879827 100644
--- a/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
+++ b/llvm/lib/Target/CSKY/CSKYFrameLowering.cpp
@@ -476,7 +476,7 @@ bool CSKYFrameLowering::spillCalleeSavedRegisters(
// Insert the spill to the stack frame.
MCRegister Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, CS.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CS.getFrameIdx(), RC,
Register());
}
@@ -498,8 +498,7 @@ bool CSKYFrameLowering::restoreCalleeSavedRegisters(
for (auto &CS : reverse(CSI)) {
MCRegister Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
- Register());
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, Register());
assert(MI != MBB.begin() && "loadRegFromStackSlot didn't insert any code!");
}
diff --git a/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp b/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
index 34a7de8..3ab0990 100644
--- a/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
+++ b/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
@@ -24,8 +24,9 @@ using namespace llvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "CSKYGenInstrInfo.inc"
-CSKYInstrInfo::CSKYInstrInfo(const CSKYSubtarget &STI)
- : CSKYGenInstrInfo(STI, RI, CSKY::ADJCALLSTACKDOWN, CSKY::ADJCALLSTACKUP),
+CSKYInstrInfo::CSKYInstrInfo(const CSKYSubtarget &STI,
+ const CSKYRegisterInfo &TRI)
+ : CSKYGenInstrInfo(STI, TRI, CSKY::ADJCALLSTACKDOWN, CSKY::ADJCALLSTACKUP),
STI(STI) {
v2sf = STI.hasFPUv2SingleFloat();
v2df = STI.hasFPUv2DoubleFloat();
@@ -393,7 +394,6 @@ void CSKYInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -434,10 +434,12 @@ void CSKYInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addMemOperand(MMO);
}
-void CSKYInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void CSKYInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/CSKY/CSKYInstrInfo.h b/llvm/lib/Target/CSKY/CSKYInstrInfo.h
index 6451c0a..d1cd039 100644
--- a/llvm/lib/Target/CSKY/CSKYInstrInfo.h
+++ b/llvm/lib/Target/CSKY/CSKYInstrInfo.h
@@ -21,6 +21,7 @@
namespace llvm {
+class CSKYRegisterInfo;
class CSKYSubtarget;
class CSKYInstrInfo : public CSKYGenInstrInfo {
@@ -33,7 +34,7 @@ protected:
const CSKYSubtarget &STI;
public:
- explicit CSKYInstrInfo(const CSKYSubtarget &STI);
+ CSKYInstrInfo(const CSKYSubtarget &STI, const CSKYRegisterInfo &RI);
Register isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const override;
@@ -42,14 +43,12 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
diff --git a/llvm/lib/Target/CSKY/CSKYSubtarget.cpp b/llvm/lib/Target/CSKY/CSKYSubtarget.cpp
index a554d1c..94e412e 100644
--- a/llvm/lib/Target/CSKY/CSKYSubtarget.cpp
+++ b/llvm/lib/Target/CSKY/CSKYSubtarget.cpp
@@ -92,7 +92,7 @@ CSKYSubtarget::CSKYSubtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU,
StringRef FS, const TargetMachine &TM)
: CSKYGenSubtargetInfo(TT, CPU, TuneCPU, FS),
FrameLowering(initializeSubtargetDependencies(TT, CPU, TuneCPU, FS)),
- InstrInfo(*this), RegInfo(), TLInfo(TM, *this) {
+ InstrInfo(*this, RegInfo), TLInfo(TM, *this) {
TSInfo = std::make_unique<CSKYSelectionDAGInfo>();
}
diff --git a/llvm/lib/Target/CSKY/CSKYSubtarget.h b/llvm/lib/Target/CSKY/CSKYSubtarget.h
index a3f2ddc..f5ad26a 100644
--- a/llvm/lib/Target/CSKY/CSKYSubtarget.h
+++ b/llvm/lib/Target/CSKY/CSKYSubtarget.h
@@ -30,8 +30,8 @@ class CSKYSubtarget : public CSKYGenSubtargetInfo {
virtual void anchor();
CSKYFrameLowering FrameLowering;
- CSKYInstrInfo InstrInfo;
CSKYRegisterInfo RegInfo;
+ CSKYInstrInfo InstrInfo;
CSKYTargetLowering TLInfo;
std::unique_ptr<const SelectionDAGTargetInfo> TSInfo;
diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
index 26a8728..48a9085 100644
--- a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
+++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.cpp
@@ -1169,8 +1169,8 @@ void DXILBitcodeWriter::writeModuleInfo() {
// We need to hardcode a triple and datalayout that's compatible with the
// historical DXIL triple and datalayout from DXC.
StringRef Triple = "dxil-ms-dx";
- StringRef DL = "e-m:e-p:32:32-i1:8-i8:8-i16:32-i32:32-i64:64-"
- "f16:32-f32:32-f64:64-n8:16:32:64";
+ StringRef DL = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-"
+ "f16:16-f32:32-f64:64-n8:16:32:64";
writeStringRecord(Stream, bitc::MODULE_CODE_TRIPLE, Triple, 0 /*TODO*/);
writeStringRecord(Stream, bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/);
diff --git a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
index b94b148..c18db98 100644
--- a/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
+++ b/llvm/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp
@@ -463,7 +463,7 @@ void HexagonOperand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
break;
case Register:
OS << "<register R";
- OS << getReg() << ">";
+ OS << getReg().id() << ">";
break;
case Token:
OS << "'" << getToken() << "'";
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 68f5312..557a0a3 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1796,7 +1796,7 @@ namespace {
const MachineDominatorTree &MDT;
const HexagonInstrInfo &HII;
- const HexagonRegisterInfo &HRI;
+ [[maybe_unused]] const HexagonRegisterInfo &HRI;
MachineRegisterInfo &MRI;
BitTracker &BT;
};
@@ -1886,7 +1886,7 @@ bool BitSimplification::matchHalf(unsigned SelfR,
bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc,
unsigned OpNum) {
- auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI);
+ auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum);
auto *RRC = HBS::getFinalVRegClass(R, MRI);
return OpRC->hasSubClassEq(RRC);
}
diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
index eca5ac1..bae3484 100644
--- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -24,7 +24,6 @@
#include <cstdint>
#include <iterator>
#include <map>
-#include <utility>
using namespace llvm;
diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
index dd343d9..df61226 100644
--- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1405,7 +1405,7 @@ bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg);
int FI = I.getFrameIdx();
const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
- HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI, Register());
+ HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, Register());
if (IsKill)
MBB.addLiveIn(Reg);
}
@@ -1470,7 +1470,7 @@ bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
MCRegister Reg = I.getReg();
const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
int FI = I.getFrameIdx();
- HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI, Register());
+ HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, Register());
}
return true;
@@ -1814,8 +1814,7 @@ bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
.addReg(SrcR, getKillRegState(IsKill))
.addReg(TmpR0, RegState::Kill);
- auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
- HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI, Register());
+ HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, Register());
expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
NewRegs.push_back(TmpR0);
@@ -1844,9 +1843,7 @@ bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
.addImm(0x01010101);
- MachineFunction &MF = *B.getParent();
- auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
- HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI, Register());
+ HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, Register());
expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
@@ -2225,7 +2222,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
if (!Bad) {
// If the addressing mode is ok, check the register class.
unsigned OpNum = Load ? 0 : 2;
- auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI);
+ auto *RC = HII.getRegClass(In.getDesc(), OpNum);
RC = getCommonRC(SI.RC, RC);
if (RC == nullptr)
Bad = true;
@@ -2395,7 +2392,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(),
SrcOp.getSubReg() };
- auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI);
+ auto *RC = HII.getRegClass(SI.getDesc(), 2);
// The this-> is needed to unconfuse MSVC.
Register FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
diff --git a/llvm/lib/Target/Hexagon/HexagonGenMux.cpp b/llvm/lib/Target/Hexagon/HexagonGenMux.cpp
index 74e5abe..c6fffde 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenMux.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenMux.cpp
@@ -43,7 +43,6 @@
#include <cassert>
#include <iterator>
#include <limits>
-#include <utility>
#define DEBUG_TYPE "hexmux"
diff --git a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
index 9c81e96..5344ed8 100644
--- a/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonGenPredicate.cpp
@@ -30,7 +30,6 @@
#include <cassert>
#include <iterator>
#include <queue>
-#include <utility>
#define DEBUG_TYPE "gen-pred"
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index dd9f2fa..7682af4 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -964,7 +964,6 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
@@ -1009,10 +1008,12 @@ void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
}
-void HexagonInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void HexagonInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBB.findDebugLoc(I);
MachineFunction &MF = *MBB.getParent();
MachineFrameInfo &MFI = MF.getFrameInfo();
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 7a0c77c..796b978 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -188,8 +188,7 @@ public:
/// is true, the register operand is the last use and must be marked kill.
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// Load the specified register of the given register class from the specified
@@ -198,7 +197,7 @@ public:
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// This function is called for all pseudo instructions
diff --git a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
index 7cbd81f..54969b2 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoadStoreWidening.cpp
@@ -646,7 +646,7 @@ bool HexagonLoadStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
MachineInstr *CombI;
if (Acc != 0) {
const MCInstrDesc &TfrD = TII->get(Hexagon::A2_tfrsi);
- const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0);
Register VReg = MF->getRegInfo().createVirtualRegister(RC);
MachineInstr *TfrI = BuildMI(*MF, DL, TfrD, VReg).addImm(LowerAcc);
NG.push_back(TfrI);
@@ -677,7 +677,7 @@ bool HexagonLoadStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
} else {
// Create vreg = A2_tfrsi #Acc; mem[hw] = vreg
const MCInstrDesc &TfrD = TII->get(Hexagon::A2_tfrsi);
- const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(TfrD, 0);
Register VReg = MF->getRegInfo().createVirtualRegister(RC);
MachineInstr *TfrI = BuildMI(*MF, DL, TfrD, VReg).addImm(int(Acc));
NG.push_back(TfrI);
diff --git a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
index 54f5608..f375b25 100644
--- a/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonRDFOpt.cpp
@@ -34,7 +34,6 @@
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <limits>
-#include <utility>
using namespace llvm;
using namespace rdf;
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 30794f6..7dfede2 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -294,6 +294,8 @@ public:
bool useBSBScheduling() const { return UseBSBScheduling; }
bool enableMachineScheduler() const override;
+ bool enableTerminalRule() const override { return true; }
+
// Always use the TargetLowering default scheduler.
// FIXME: This will use the vliw scheduler which is probably just hurting
// compiler time and will be removed eventually anyway.
diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index cb88d1a..d39b79a 100644
--- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -653,7 +653,7 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI,
const MCInstrDesc& MCID = PacketMI.getDesc();
// First operand is always the result.
- const TargetRegisterClass *PacketRC = HII->getRegClass(MCID, 0, HRI);
+ const TargetRegisterClass *PacketRC = HII->getRegClass(MCID, 0);
// Double regs can not feed into new value store: PRM section: 5.4.2.2.
if (PacketRC == &Hexagon::DoubleRegsRegClass)
return false;
@@ -866,7 +866,7 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr &MI,
return false;
const MCInstrDesc& MCID = PI.getDesc();
- const TargetRegisterClass *VecRC = HII->getRegClass(MCID, 0, HRI);
+ const TargetRegisterClass *VecRC = HII->getRegClass(MCID, 0);
if (DisableVecDblNVStores && VecRC == &Hexagon::HvxWRRegClass)
return false;
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
index 9b6bc5a..0b2279b 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp
@@ -385,7 +385,7 @@ bool HexagonMCChecker::checkSlots() {
bool HexagonMCChecker::checkPredicates() {
// Check for proper use of new predicate registers.
for (const auto &I : NewPreds) {
- unsigned P = I;
+ MCRegister P = I;
if (!Defs.count(P) || LatePreds.count(P) || Defs.count(Hexagon::P3_0)) {
// Error out if the new predicate register is not defined,
@@ -398,7 +398,7 @@ bool HexagonMCChecker::checkPredicates() {
// Check for proper use of auto-anded of predicate registers.
for (const auto &I : LatePreds) {
- unsigned P = I;
+ MCRegister P = I;
if (LatePreds.count(P) > 1 || Defs.count(P)) {
// Error out if predicate register defined "late" multiple times or
@@ -607,7 +607,7 @@ void HexagonMCChecker::checkRegisterCurDefs() {
bool HexagonMCChecker::checkRegisters() {
// Check for proper register definitions.
for (const auto &I : Defs) {
- unsigned R = I.first;
+ MCRegister R = I.first;
if (isLoopRegister(R) && Defs.count(R) > 1 &&
(HexagonMCInstrInfo::isInnerLoop(MCB) ||
@@ -620,8 +620,8 @@ bool HexagonMCChecker::checkRegisters() {
if (SoftDefs.count(R)) {
// Error out for explicit changes to registers also weakly defined
// (e.g., "{ usr = r0; r0 = sfadd(...) }").
- unsigned UsrR = Hexagon::USR; // Silence warning about mixed types in ?:.
- unsigned BadR = RI.isSubRegister(Hexagon::USR, R) ? UsrR : R;
+ MCRegister UsrR = Hexagon::USR;
+ MCRegister BadR = RI.isSubRegister(Hexagon::USR, R) ? UsrR : R;
reportErrorRegisters(BadR);
return false;
}
@@ -633,8 +633,8 @@ bool HexagonMCChecker::checkRegisters() {
if (PM.count(Unconditional)) {
// Error out on an unconditional change when there are any other
// changes, conditional or not.
- unsigned UsrR = Hexagon::USR;
- unsigned BadR = RI.isSubRegister(Hexagon::USR, R) ? UsrR : R;
+ MCRegister UsrR = Hexagon::USR;
+ MCRegister BadR = RI.isSubRegister(Hexagon::USR, R) ? UsrR : R;
reportErrorRegisters(BadR);
return false;
}
@@ -664,7 +664,7 @@ bool HexagonMCChecker::checkRegisters() {
// Check for use of temporary definitions.
for (const auto &I : TmpDefs) {
- unsigned R = I;
+ MCRegister R = I;
if (!Uses.count(R)) {
// special case for vhist
@@ -765,12 +765,12 @@ void HexagonMCChecker::compoundRegisterMap(unsigned &Register) {
}
}
-void HexagonMCChecker::reportErrorRegisters(unsigned Register) {
+void HexagonMCChecker::reportErrorRegisters(MCRegister Register) {
reportError("register `" + Twine(RI.getName(Register)) +
"' modified more than once");
}
-void HexagonMCChecker::reportErrorNewValue(unsigned Register) {
+void HexagonMCChecker::reportErrorNewValue(MCRegister Register) {
reportError("register `" + Twine(RI.getName(Register)) +
"' used with `.new' "
"but not validly modified in the same packet");
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
index e9b87c5..8beee8d 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h
@@ -39,41 +39,41 @@ class HexagonMCChecker {
bool ReportErrors;
/// Set of definitions: register #, if predicated, if predicated true.
- using PredSense = std::pair<unsigned, bool>;
+ using PredSense = std::pair<MCRegister, bool>;
static const PredSense Unconditional;
using PredSet = std::multiset<PredSense>;
using PredSetIterator = std::multiset<PredSense>::iterator;
- using DefsIterator = DenseMap<unsigned, PredSet>::iterator;
- DenseMap<unsigned, PredSet> Defs;
+ using DefsIterator = DenseMap<MCRegister, PredSet>::iterator;
+ DenseMap<MCRegister, PredSet> Defs;
/// Set of weak definitions whose clashes should be enforced selectively.
- using SoftDefsIterator = std::set<unsigned>::iterator;
- std::set<unsigned> SoftDefs;
+ using SoftDefsIterator = std::set<MCRegister>::iterator;
+ std::set<MCRegister> SoftDefs;
/// Set of temporary definitions not committed to the register file.
- using TmpDefsIterator = std::set<unsigned>::iterator;
- std::set<unsigned> TmpDefs;
+ using TmpDefsIterator = std::set<MCRegister>::iterator;
+ std::set<MCRegister> TmpDefs;
/// Set of new predicates used.
- using NewPredsIterator = std::set<unsigned>::iterator;
- std::set<unsigned> NewPreds;
+ using NewPredsIterator = std::set<MCRegister>::iterator;
+ std::set<MCRegister> NewPreds;
/// Set of predicates defined late.
- using LatePredsIterator = std::multiset<unsigned>::iterator;
- std::multiset<unsigned> LatePreds;
+ using LatePredsIterator = std::multiset<MCRegister>::iterator;
+ std::multiset<MCRegister> LatePreds;
/// Set of uses.
- using UsesIterator = std::set<unsigned>::iterator;
- std::set<unsigned> Uses;
+ using UsesIterator = std::set<MCRegister>::iterator;
+ std::set<MCRegister> Uses;
/// Pre-defined set of read-only registers.
- using ReadOnlyIterator = std::set<unsigned>::iterator;
- std::set<unsigned> ReadOnly;
+ using ReadOnlyIterator = std::set<MCRegister>::iterator;
+ std::set<MCRegister> ReadOnly;
// Contains the vector-pair-registers with the even number
// first ("v0:1", e.g.) used/def'd in this packet.
- std::set<unsigned> ReversePairs;
+ std::set<MCRegister> ReversePairs;
void init();
void init(MCInst const &);
@@ -107,7 +107,7 @@ class HexagonMCChecker {
static void compoundRegisterMap(unsigned &);
- bool isLoopRegister(unsigned R) const {
+ bool isLoopRegister(MCRegister R) const {
return (Hexagon::SA0 == R || Hexagon::LC0 == R || Hexagon::SA1 == R ||
Hexagon::LC1 == R);
}
@@ -120,8 +120,8 @@ public:
MCSubtargetInfo const &STI, bool CopyReportErrors);
bool check(bool FullCheck = true);
- void reportErrorRegisters(unsigned Register);
- void reportErrorNewValue(unsigned Register);
+ void reportErrorRegisters(MCRegister Register);
+ void reportErrorNewValue(MCRegister Register);
void reportError(SMLoc Loc, Twine const &Msg);
void reportNote(SMLoc Loc, Twine const &Msg);
void reportError(Twine const &Msg);
diff --git a/llvm/lib/Target/Hexagon/RDFCopy.cpp b/llvm/lib/Target/Hexagon/RDFCopy.cpp
index 3b1d3bd..4cab5da 100644
--- a/llvm/lib/Target/Hexagon/RDFCopy.cpp
+++ b/llvm/lib/Target/Hexagon/RDFCopy.cpp
@@ -26,7 +26,6 @@
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
-#include <utility>
using namespace llvm;
using namespace rdf;
diff --git a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
index cef77f1..0444c86 100644
--- a/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
+++ b/llvm/lib/Target/Lanai/AsmParser/LanaiAsmParser.cpp
@@ -559,7 +559,7 @@ public:
OS << "Token: " << getToken() << "\n";
break;
case REGISTER:
- OS << "Reg: %r" << getReg() << "\n";
+ OS << "Reg: %r" << getReg().id() << "\n";
break;
case MEMORY_IMM:
OS << "MemImm: ";
@@ -567,14 +567,14 @@ public:
OS << '\n';
break;
case MEMORY_REG_IMM:
- OS << "MemRegImm: " << getMemBaseReg() << "+";
+ OS << "MemRegImm: " << getMemBaseReg().id() << "+";
MAI.printExpr(OS, *getMemOffset());
OS << '\n';
break;
case MEMORY_REG_REG:
assert(getMemOffset() == nullptr);
- OS << "MemRegReg: " << getMemBaseReg() << "+"
- << "%r" << getMemOffsetReg() << "\n";
+ OS << "MemRegReg: " << getMemBaseReg().id() << "+"
+ << "%r" << getMemOffsetReg().id() << "\n";
break;
}
}
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index b3d2856..14b7557 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -49,8 +49,7 @@ void LanaiInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void LanaiInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register SourceRegister, bool IsKill, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo * /*RegisterInfo*/, Register /*VReg*/,
+ const TargetRegisterClass *RegisterClass, Register /*VReg*/,
MachineInstr::MIFlag /*Flags*/) const {
DebugLoc DL;
if (Position != MBB.end()) {
@@ -70,8 +69,7 @@ void LanaiInstrInfo::storeRegToStackSlot(
void LanaiInstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register DestinationRegister, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo * /*RegisterInfo*/, Register /*VReg*/,
+ const TargetRegisterClass *RegisterClass, Register /*VReg*/,
MachineInstr::MIFlag /*Flags*/) const {
DebugLoc DL;
if (Position != MBB.end()) {
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.h b/llvm/lib/Target/Lanai/LanaiInstrInfo.h
index d9827624..155e2f0 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.h
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.h
@@ -58,15 +58,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register SourceRegister, bool IsKill, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo *RegisterInfo, Register VReg,
+ const TargetRegisterClass *RegisterClass, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator Position,
Register DestinationRegister, int FrameIndex,
- const TargetRegisterClass *RegisterClass,
- const TargetRegisterInfo *RegisterInfo, Register VReg,
+ const TargetRegisterClass *RegisterClass, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp b/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp
index 0ccebeb3..6358e348 100644
--- a/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchDeadRegisterDefinitions.cpp
@@ -60,7 +60,6 @@ bool LoongArchDeadRegisterDefinitions::runOnMachineFunction(
return false;
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
LLVM_DEBUG(dbgs() << "***** LoongArchDeadRegisterDefinitions *****\n");
@@ -86,7 +85,7 @@ bool LoongArchDeadRegisterDefinitions::runOnMachineFunction(
continue;
LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
MI.print(dbgs()));
- const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, I);
if (!(RC && RC->contains(LoongArch::R0))) {
LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
continue;
diff --git a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
index 1493bf4..690b063 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchFrameLowering.cpp
@@ -449,7 +449,7 @@ bool LoongArchFrameLowering::spillCalleeSavedRegisters(
bool IsKill =
!(Reg == LoongArch::R1 && MF->getFrameInfo().isReturnAddressTaken());
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, CS.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, CS.getFrameIdx(), RC,
Register());
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
index 5eb3bd6..9a33dcc 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
@@ -113,14 +113,14 @@ void LoongArchInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void LoongArchInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register SrcReg,
bool IsKill, int FI, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
unsigned Opcode;
if (LoongArch::GPRRegClass.hasSubClassEq(RC))
- Opcode = TRI->getRegSizeInBits(LoongArch::GPRRegClass) == 32
+ Opcode = TRI.getRegSizeInBits(LoongArch::GPRRegClass) == 32
? LoongArch::ST_W
: LoongArch::ST_D;
else if (LoongArch::FPR32RegClass.hasSubClassEq(RC))
@@ -149,8 +149,8 @@ void LoongArchInstrInfo::storeRegToStackSlot(
void LoongArchInstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DstReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+ int FI, const TargetRegisterClass *RC, Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
DebugLoc DL;
@@ -159,7 +159,7 @@ void LoongArchInstrInfo::loadRegFromStackSlot(
unsigned Opcode;
if (LoongArch::GPRRegClass.hasSubClassEq(RC))
- Opcode = TRI->getRegSizeInBits(LoongArch::GPRRegClass) == 32
+ Opcode = RegInfo.getRegSizeInBits(LoongArch::GPRRegClass) == 32
? LoongArch::LD_W
: LoongArch::LD_D;
else if (LoongArch::FPR32RegClass.hasSubClassEq(RC))
@@ -378,9 +378,12 @@ bool LoongArchInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
}
}
-bool LoongArchInstrInfo::isSafeToMove(const MachineInstr &MI,
- const MachineBasicBlock *MBB,
- const MachineFunction &MF) const {
+bool LoongArchInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const {
+ if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
+ return true;
+
auto MII = MI.getIterator();
auto MIE = MBB->end();
@@ -426,25 +429,25 @@ bool LoongArchInstrInfo::isSafeToMove(const MachineInstr &MI,
auto MO2 = Lu32I->getOperand(2).getTargetFlags();
if (MO0 == LoongArchII::MO_PCREL_HI && MO1 == LoongArchII::MO_PCREL_LO &&
MO2 == LoongArchII::MO_PCREL64_LO)
- return false;
+ return true;
if ((MO0 == LoongArchII::MO_GOT_PC_HI || MO0 == LoongArchII::MO_LD_PC_HI ||
MO0 == LoongArchII::MO_GD_PC_HI) &&
MO1 == LoongArchII::MO_GOT_PC_LO && MO2 == LoongArchII::MO_GOT_PC64_LO)
- return false;
+ return true;
if (MO0 == LoongArchII::MO_IE_PC_HI && MO1 == LoongArchII::MO_IE_PC_LO &&
MO2 == LoongArchII::MO_IE_PC64_LO)
- return false;
+ return true;
if (MO0 == LoongArchII::MO_DESC_PC_HI &&
MO1 == LoongArchII::MO_DESC_PC_LO &&
MO2 == LoongArchII::MO_DESC64_PC_LO)
- return false;
+ return true;
break;
}
case LoongArch::LU52I_D: {
auto MO = MI.getOperand(2).getTargetFlags();
if (MO == LoongArchII::MO_PCREL64_HI || MO == LoongArchII::MO_GOT_PC64_HI ||
MO == LoongArchII::MO_IE_PC64_HI || MO == LoongArchII::MO_DESC64_PC_HI)
- return false;
+ return true;
break;
}
default:
@@ -484,7 +487,7 @@ bool LoongArchInstrInfo::isSafeToMove(const MachineInstr &MI,
auto MO1 = LoongArchII::getDirectFlags(SecondOp->getOperand(2));
auto MO2 = LoongArchII::getDirectFlags(Ld->getOperand(2));
if (MO1 == LoongArchII::MO_DESC_PC_LO && MO2 == LoongArchII::MO_DESC_LD)
- return false;
+ return true;
break;
}
if (SecondOp == MIE ||
@@ -493,34 +496,34 @@ bool LoongArchInstrInfo::isSafeToMove(const MachineInstr &MI,
auto MO1 = LoongArchII::getDirectFlags(SecondOp->getOperand(2));
if (MO0 == LoongArchII::MO_PCREL_HI && SecondOp->getOpcode() == AddiOp &&
MO1 == LoongArchII::MO_PCREL_LO)
- return false;
+ return true;
if (MO0 == LoongArchII::MO_GOT_PC_HI && SecondOp->getOpcode() == LdOp &&
MO1 == LoongArchII::MO_GOT_PC_LO)
- return false;
+ return true;
if ((MO0 == LoongArchII::MO_LD_PC_HI ||
MO0 == LoongArchII::MO_GD_PC_HI) &&
SecondOp->getOpcode() == AddiOp && MO1 == LoongArchII::MO_GOT_PC_LO)
- return false;
+ return true;
break;
}
case LoongArch::ADDI_W:
case LoongArch::ADDI_D: {
auto MO = LoongArchII::getDirectFlags(MI.getOperand(2));
if (MO == LoongArchII::MO_PCREL_LO || MO == LoongArchII::MO_GOT_PC_LO)
- return false;
+ return true;
break;
}
case LoongArch::LD_W:
case LoongArch::LD_D: {
auto MO = LoongArchII::getDirectFlags(MI.getOperand(2));
if (MO == LoongArchII::MO_GOT_PC_LO)
- return false;
+ return true;
break;
}
case LoongArch::PseudoDESC_CALL: {
auto MO = LoongArchII::getDirectFlags(MI.getOperand(2));
if (MO == LoongArchII::MO_DESC_CALL)
- return false;
+ return true;
break;
}
default:
@@ -528,18 +531,6 @@ bool LoongArchInstrInfo::isSafeToMove(const MachineInstr &MI,
}
}
- return true;
-}
-
-bool LoongArchInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
- const MachineBasicBlock *MBB,
- const MachineFunction &MF) const {
- if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF))
- return true;
-
- if (!isSafeToMove(MI, MBB, MF))
- return true;
-
return false;
}
@@ -665,13 +656,13 @@ void LoongArchInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
if (FrameIndex == -1)
report_fatal_error("The function size is incorrectly estimated.");
storeRegToStackSlot(MBB, PCALAU12I, Scav, /*IsKill=*/true, FrameIndex,
- &LoongArch::GPRRegClass, TRI, Register());
+ &LoongArch::GPRRegClass, Register());
TRI->eliminateFrameIndex(std::prev(PCALAU12I.getIterator()),
/*SpAdj=*/0, /*FIOperandNum=*/1);
PCALAU12I.getOperand(1).setMBB(&RestoreBB);
ADDI.getOperand(2).setMBB(&RestoreBB);
loadRegFromStackSlot(RestoreBB, RestoreBB.end(), Scav, FrameIndex,
- &LoongArch::GPRRegClass, TRI, Register());
+ &LoongArch::GPRRegClass, Register());
TRI->eliminateFrameIndex(RestoreBB.back(),
/*SpAdj=*/0, /*FIOperandNum=*/1);
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
index d43d229..796ef9f 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
@@ -40,13 +40,11 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Materializes the given integer Val into DstReg.
@@ -68,9 +66,6 @@ public:
bool isBranchOffsetInRange(unsigned BranchOpc,
int64_t BrOffset) const override;
- bool isSafeToMove(const MachineInstr &MI, const MachineBasicBlock *MBB,
- const MachineFunction &MF) const override;
-
bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
index c6be190b..91077ff 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.cpp
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
@@ -43,7 +43,7 @@ using namespace llvm;
void M68kInstrInfo::anchor() {}
M68kInstrInfo::M68kInstrInfo(const M68kSubtarget &STI)
- : M68kGenInstrInfo(STI, M68k::ADJCALLSTACKDOWN, M68k::ADJCALLSTACKUP, 0,
+ : M68kGenInstrInfo(STI, RI, M68k::ADJCALLSTACKDOWN, M68k::ADJCALLSTACKUP, 0,
M68k::RET),
Subtarget(STI), RI(STI) {}
@@ -838,15 +838,14 @@ bool M68kInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
void M68kInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
const MachineFrameInfo &MFI = MBB.getParent()->getFrameInfo();
- assert(MFI.getObjectSize(FrameIndex) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIndex) >= TRI.getSpillSize(*RC) &&
"Stack slot is too small to store");
(void)MFI;
- unsigned Opc = getStoreRegOpcode(SrcReg, RC, TRI, Subtarget);
+ unsigned Opc = getStoreRegOpcode(SrcReg, RC, &TRI, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
// (0,FrameIndex) <- $reg
M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIndex)
@@ -857,15 +856,14 @@ void M68kInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DstReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
const MachineFrameInfo &MFI = MBB.getParent()->getFrameInfo();
- assert(MFI.getObjectSize(FrameIndex) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIndex) >= TRI.getSpillSize(*RC) &&
"Stack slot is too small to load");
(void)MFI;
- unsigned Opc = getLoadRegOpcode(DstReg, RC, TRI, Subtarget);
+ unsigned Opc = getLoadRegOpcode(DstReg, RC, &TRI, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DstReg), FrameIndex);
}
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.h b/llvm/lib/Target/M68k/M68kInstrInfo.h
index 97615d6..2b3789d 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.h
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.h
@@ -280,14 +280,12 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp b/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
index a31c8ec..a8891d6 100644
--- a/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
+++ b/llvm/lib/Target/MSP430/AsmParser/MSP430AsmParser.cpp
@@ -230,7 +230,7 @@ public:
O << "Token " << Tok;
break;
case k_Reg:
- O << "Register " << Reg;
+ O << "Register " << Reg.id();
break;
case k_Imm:
O << "Immediate ";
@@ -241,10 +241,10 @@ public:
MAI.printExpr(O, *Mem.Offset);
break;
case k_IndReg:
- O << "RegInd " << Reg;
+ O << "RegInd " << Reg.id();
break;
case k_PostIndReg:
- O << "PostInc " << Reg;
+ O << "PostInc " << Reg.id();
break;
}
}
diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index af053b8..0fb4e9d 100644
--- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -32,8 +32,7 @@ MSP430InstrInfo::MSP430InstrInfo(const MSP430Subtarget &STI)
void MSP430InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIdx, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
@@ -57,10 +56,12 @@ void MSP430InstrInfo::storeRegToStackSlot(
llvm_unreachable("Cannot store this register to stack slot!");
}
-void MSP430InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void MSP430InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (MI != MBB.end()) DL = MI->getDebugLoc();
MachineFunction &MF = *MBB.getParent();
diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.h b/llvm/lib/Target/MSP430/MSP430InstrInfo.h
index 316c136..c0a3984 100644
--- a/llvm/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.h
@@ -42,13 +42,11 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIdx, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index 69b96cf..d23ec57 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -101,7 +101,6 @@ void Mips16InstrInfo::storeRegToStack(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
int64_t Offset,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -116,10 +115,12 @@ void Mips16InstrInfo::storeRegToStack(MachineBasicBlock &MBB,
.addMemOperand(MMO);
}
-void Mips16InstrInfo::loadRegFromStack(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset, MachineInstr::MIFlag Flags) const {
+void Mips16InstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ int64_t Offset,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.h b/llvm/lib/Target/Mips/Mips16InstrInfo.h
index 2834fd3..4300d08 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.h
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.h
@@ -56,13 +56,14 @@ public:
void storeRegToStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+
+ int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.h b/llvm/lib/Target/Mips/MipsInstrInfo.h
index fc94248..0b90972 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -147,31 +147,28 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override {
- storeRegToStack(MBB, MBBI, SrcReg, isKill, FrameIndex, RC, TRI, 0, Flags);
+ storeRegToStack(MBB, MBBI, SrcReg, isKill, FrameIndex, RC, 0, Flags);
}
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override {
- loadRegFromStack(MBB, MBBI, DestReg, FrameIndex, RC, TRI, 0, Flags);
+ loadRegFromStack(MBB, MBBI, DestReg, FrameIndex, RC, 0, Flags);
}
virtual void
storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset,
+ const TargetRegisterClass *RC, int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const = 0;
virtual void loadRegFromStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int FrameIndex, const TargetRegisterClass *RC, int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const = 0;
virtual void adjustStackPtr(unsigned SP, int64_t Amount,
diff --git a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
index f08704a..942194c 100644
--- a/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -172,7 +172,7 @@ void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
Register VR = MRI.createVirtualRegister(RC);
Register Dst = I->getOperand(0).getReg(), FI = I->getOperand(1).getIndex();
- TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
+ TII.loadRegFromStack(MBB, I, VR, FI, RC, 0);
BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
.addReg(VR, RegState::Kill);
}
@@ -189,7 +189,7 @@ void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
.addReg(Src, getKillRegState(I->getOperand(0).isKill()));
- TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, VR, true, FI, RC, 0);
}
void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
@@ -210,9 +210,9 @@ void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
DebugLoc DL = I->getDebugLoc();
const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
- TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
+ TII.loadRegFromStack(MBB, I, VR0, FI, RC, 0);
BuildMI(MBB, I, DL, Desc, Lo).addReg(VR0, RegState::Kill);
- TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
+ TII.loadRegFromStack(MBB, I, VR1, FI, RC, RegSize);
BuildMI(MBB, I, DL, Desc, Hi).addReg(VR1, RegState::Kill);
}
@@ -234,9 +234,9 @@ void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
DebugLoc DL = I->getDebugLoc();
BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
- TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, VR0, true, FI, RC, 0);
BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
- TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
+ TII.storeRegToStack(MBB, I, VR1, true, FI, RC, RegSize);
}
bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
@@ -321,11 +321,9 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC2);
if (!Subtarget.isLittle())
std::swap(LoReg, HiReg);
- TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC,
- &RegInfo, 0);
- TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC,
- &RegInfo, 4);
- TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
+ TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, 0);
+ TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, 4);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, 0);
return true;
}
@@ -385,8 +383,8 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
// We re-use the same spill slot each time so that the stack frame doesn't
// grow too much in functions with a large number of moves.
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC);
- TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0);
- TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
+ TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, 0);
+ TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, Offset);
return true;
}
@@ -480,8 +478,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
MBB.addLiveIn(ABI.GetEhDataReg(I));
TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
- MipsFI->getEhDataRegFI(I), RC, &RegInfo,
- Register());
+ MipsFI->getEhDataRegFI(I), RC, Register());
}
// Emit .cfi_offset directives for eh data registers.
@@ -579,8 +576,7 @@ void MipsSEFrameLowering::emitInterruptPrologueStub(
.setMIFlag(MachineInstr::FrameSetup);
STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
- MipsFI->getISRRegFI(0), PtrRC,
- STI.getRegisterInfo(), 0);
+ MipsFI->getISRRegFI(0), PtrRC, 0);
// Fetch and Spill Status
MBB.addLiveIn(Mips::COP012);
@@ -590,8 +586,7 @@ void MipsSEFrameLowering::emitInterruptPrologueStub(
.setMIFlag(MachineInstr::FrameSetup);
STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
- MipsFI->getISRRegFI(1), PtrRC,
- STI.getRegisterInfo(), 0);
+ MipsFI->getISRRegFI(1), PtrRC, 0);
// Build the configuration for disabling lower priority interrupts. Non EIC
// interrupts need to be masked off with zero, EIC from the Cause register.
@@ -657,7 +652,6 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
const MipsSEInstrInfo &TII =
*static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
- const MipsRegisterInfo &RegInfo = *STI.getRegisterInfo();
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
MipsABIInfo ABI = STI.getABI();
@@ -690,8 +684,7 @@ void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
// Insert instructions that restore eh data registers.
for (int J = 0; J < 4; ++J) {
TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(J),
- MipsFI->getEhDataRegFI(J), RC, &RegInfo,
- Register());
+ MipsFI->getEhDataRegFI(J), RC, Register());
}
}
@@ -722,17 +715,15 @@ void MipsSEFrameLowering::emitInterruptEpilogueStub(
BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB));
// Restore EPC
- STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
- MipsFI->getISRRegFI(0), PtrRC,
- STI.getRegisterInfo(), Register());
+ STI.getInstrInfo()->loadRegFromStackSlot(
+ MBB, MBBI, Mips::K1, MipsFI->getISRRegFI(0), PtrRC, Register());
BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014)
.addReg(Mips::K1)
.addImm(0);
// Restore Status
- STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
- MipsFI->getISRRegFI(1), PtrRC,
- STI.getRegisterInfo(), Register());
+ STI.getInstrInfo()->loadRegFromStackSlot(
+ MBB, MBBI, Mips::K1, MipsFI->getISRRegFI(1), PtrRC, Register());
BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
.addReg(Mips::K1)
.addImm(0);
@@ -795,7 +786,7 @@ bool MipsSEFrameLowering::spillCalleeSavedRegisters(
// Insert the spill to the stack frame.
bool IsKill = !IsRAAndRetAddrIsTaken;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, I.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, IsKill, I.getFrameIdx(), RC,
Register());
}
diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
index 517f489..a1d0aa0 100644
--- a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -209,7 +209,6 @@ void MipsSEInstrInfo::storeRegToStack(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
int64_t Offset,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -235,16 +234,16 @@ void MipsSEInstrInfo::storeRegToStack(MachineBasicBlock &MBB,
Opc = Mips::SDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = Mips::SDC164;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v16i8))
Opc = Mips::ST_B;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
- TRI->isTypeLegalForClass(*RC, MVT::v8f16))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v8i16) ||
+ RI.isTypeLegalForClass(*RC, MVT::v8f16))
Opc = Mips::ST_H;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
- TRI->isTypeLegalForClass(*RC, MVT::v4f32))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v4i32) ||
+ RI.isTypeLegalForClass(*RC, MVT::v4f32))
Opc = Mips::ST_W;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
- TRI->isTypeLegalForClass(*RC, MVT::v2f64))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v2i64) ||
+ RI.isTypeLegalForClass(*RC, MVT::v2f64))
Opc = Mips::ST_D;
else if (Mips::LO32RegClass.hasSubClassEq(RC))
Opc = Mips::SW;
@@ -281,10 +280,12 @@ void MipsSEInstrInfo::storeRegToStack(MachineBasicBlock &MBB,
.addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
}
-void MipsSEInstrInfo::loadRegFromStack(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- int64_t Offset, MachineInstr::MIFlag Flags) const {
+void MipsSEInstrInfo::loadRegFromStack(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ int64_t Offset,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
@@ -313,16 +314,16 @@ void MipsSEInstrInfo::loadRegFromStack(
Opc = Mips::LDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC164;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v16i8))
Opc = Mips::LD_B;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
- TRI->isTypeLegalForClass(*RC, MVT::v8f16))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v8i16) ||
+ RI.isTypeLegalForClass(*RC, MVT::v8f16))
Opc = Mips::LD_H;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
- TRI->isTypeLegalForClass(*RC, MVT::v4f32))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v4i32) ||
+ RI.isTypeLegalForClass(*RC, MVT::v4f32))
Opc = Mips::LD_W;
- else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
- TRI->isTypeLegalForClass(*RC, MVT::v2f64))
+ else if (RI.isTypeLegalForClass(*RC, MVT::v2i64) ||
+ RI.isTypeLegalForClass(*RC, MVT::v2f64))
Opc = Mips::LD_D;
else if (Mips::HI32RegClass.hasSubClassEq(RC))
Opc = Mips::LW;
@@ -678,8 +679,8 @@ MipsSEInstrInfo::compareOpndSize(unsigned Opc,
const MCInstrDesc &Desc = get(Opc);
assert(Desc.NumOperands == 2 && "Unary instruction expected.");
const MipsRegisterInfo *RI = &getRegisterInfo();
- unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0, RI));
- unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1, RI));
+ unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0));
+ unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1));
return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
}
diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/llvm/lib/Target/Mips/MipsSEInstrInfo.h
index 0a7a0e5..5c48ccd 100644
--- a/llvm/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.h
@@ -50,13 +50,12 @@ public:
void storeRegToStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStack(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, int64_t Offset,
+ int FrameIndex, const TargetRegisterClass *RC, int64_t Offset,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index e1b37fd..9bbb3aa 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -89,7 +89,6 @@
#include <cstdint>
#include <cstring>
#include <string>
-#include <utility>
using namespace llvm;
diff --git a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
index 910bc9d..aae3e49 100644
--- a/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -2520,11 +2520,11 @@ bool PPCFrameLowering::spillCalleeSavedRegisters(
// saved vector registers.
if (Subtarget.needsSwapsForVSXMemOps() &&
!MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
- TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn,
- I.getFrameIdx(), RC, TRI);
+ TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(),
+ RC);
else
TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, I.getFrameIdx(), RC,
- TRI, Register());
+ Register());
}
}
}
@@ -2690,10 +2690,9 @@ bool PPCFrameLowering::restoreCalleeSavedRegisters(
// saved vector registers.
if (Subtarget.needsSwapsForVSXMemOps() &&
!MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
- TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
- TRI);
+ TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC);
else
- TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI,
+ TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
Register());
assert(I != MBB.begin() &&
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 8d9d4c7..366a7b6 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2014,8 +2014,7 @@ void PPCInstrInfo::StoreRegToStackSlot(
void PPCInstrInfo::storeRegToStackSlotNoUpd(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg,
- bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
+ bool isKill, int FrameIdx, const TargetRegisterClass *RC) const {
MachineFunction &MF = *MBB.getParent();
SmallVector<MachineInstr *, 4> NewMIs;
@@ -2034,8 +2033,7 @@ void PPCInstrInfo::storeRegToStackSlotNoUpd(
void PPCInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIdx, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags) const {
// We need to avoid a situation in which the value from a VRRC register is
// spilled using an Altivec instruction and reloaded into a VSRC register
@@ -2045,7 +2043,7 @@ void PPCInstrInfo::storeRegToStackSlot(
// the register is defined using an Altivec instruction and is then used by a
// VSX instruction.
RC = updatedRC(RC);
- storeRegToStackSlotNoUpd(MBB, MI, SrcReg, isKill, FrameIdx, RC, TRI);
+ storeRegToStackSlotNoUpd(MBB, MI, SrcReg, isKill, FrameIdx, RC);
}
void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
@@ -2060,8 +2058,7 @@ void PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, const DebugLoc &DL,
void PPCInstrInfo::loadRegFromStackSlotNoUpd(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg,
- int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const {
+ int FrameIdx, const TargetRegisterClass *RC) const {
MachineFunction &MF = *MBB.getParent();
SmallVector<MachineInstr*, 4> NewMIs;
DebugLoc DL;
@@ -2080,10 +2077,12 @@ void PPCInstrInfo::loadRegFromStackSlotNoUpd(
NewMIs.back()->addMemOperand(MF, MMO);
}
-void PPCInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
// We need to avoid a situation in which the value from a VRRC register is
// spilled using an Altivec instruction and reloaded into a VSRC register
// using a VSX instruction. The issue with this is that the VSX
@@ -2093,7 +2092,7 @@ void PPCInstrInfo::loadRegFromStackSlot(
// VSX instruction.
RC = updatedRC(RC);
- loadRegFromStackSlotNoUpd(MBB, MI, DestReg, FrameIdx, RC, TRI);
+ loadRegFromStackSlotNoUpd(MBB, MI, DestReg, FrameIdx, RC);
}
bool PPCInstrInfo::
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h
index d67fc28..8b824bc 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h
@@ -570,7 +570,8 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Emits a register spill without updating the register class for vector
@@ -579,13 +580,13 @@ public:
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterClass *RC) const;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Emits a register reload without updating the register class for vector
@@ -594,8 +595,7 @@ public:
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const;
+ const TargetRegisterClass *RC) const;
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const;
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 85b4072..b3a7c82 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -2023,7 +2023,7 @@ Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *RC = getPointerRegClass();
Register BaseReg = MRI.createVirtualRegister(RC);
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0));
BuildMI(*MBB, Ins, DL, MCID, BaseReg)
.addFrameIndex(FrameIdx).addImm(Offset);
@@ -2051,7 +2051,7 @@ void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
const MCInstrDesc &MCID = MI.getDesc();
MachineRegisterInfo &MRI = MF.getRegInfo();
- MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, FIOperandNum, this));
+ MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, FIOperandNum));
}
bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h
index 98c8738..a2b75e4 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h
@@ -11,7 +11,6 @@
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCFixup.h"
-#include <utility>
#undef RISCV
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 26f434b..cedaa86 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -79,6 +79,32 @@ static void generateInstSeqImpl(int64_t Val, const MCSubtargetInfo &STI,
}
}
+ if (STI.hasFeature(RISCV::FeatureStdExtP)) {
+ // Check if the immediate is packed i8 or i10
+ int32_t Bit63To32 = Val >> 32;
+ int32_t Bit31To0 = Val;
+ int16_t Bit31To16 = Bit31To0 >> 16;
+ int16_t Bit15To0 = Bit31To0;
+ int8_t Bit15To8 = Bit15To0 >> 8;
+ int8_t Bit7To0 = Bit15To0;
+ if (Bit63To32 == Bit31To0) {
+ if (IsRV64 && isInt<10>(Bit63To32)) {
+ Res.emplace_back(RISCV::PLI_W, Bit63To32);
+ return;
+ }
+ if (Bit31To16 == Bit15To0) {
+ if (isInt<10>(Bit31To16)) {
+ Res.emplace_back(RISCV::PLI_H, Bit31To16);
+ return;
+ }
+ if (Bit15To8 == Bit7To0) {
+ Res.emplace_back(RISCV::PLI_B, Bit15To8);
+ return;
+ }
+ }
+ }
+ }
+
if (isInt<32>(Val)) {
// Depending on the active bits in the immediate Value v, the following
// instruction sequences are emitted:
@@ -562,6 +588,9 @@ OpndKind Inst::getOpndKind() const {
case RISCV::LUI:
case RISCV::QC_LI:
case RISCV::QC_E_LI:
+ case RISCV::PLI_B:
+ case RISCV::PLI_H:
+ case RISCV::PLI_W:
return RISCVMatInt::Imm;
case RISCV::ADD_UW:
return RISCVMatInt::RegX0;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
index a82cd65..5df8edb 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
@@ -21,7 +21,7 @@ namespace RISCVMatInt {
enum OpndKind {
RegImm, // ADDI/ADDIW/XORI/SLLI/SRLI/SLLI_UW/RORI/BSETI/BCLRI/TH_SRRI
- Imm, // LUI/QC_LI/QC_E_LI
+ Imm, // LUI/QC_LI/QC_E_LI/PLI_B/PLI_H/PLI_W
RegReg, // SH1ADD/SH2ADD/SH3ADD/PACK
RegX0, // ADD_UW
};
diff --git a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
index 51180f5..5d3d9b5 100644
--- a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
+++ b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
@@ -59,7 +59,6 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
return false;
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
LLVM_DEBUG(dbgs() << "***** RISCVDeadRegisterDefinitions *****\n");
@@ -89,7 +88,7 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
MI.print(dbgs()));
Register X0Reg;
- const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(Desc, I);
if (RC && RC->contains(RISCV::X0)) {
X0Reg = RISCV::X0;
} else if (RC && RC->contains(RISCV::X0_W)) {
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index f881c4c..f7fc952 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -291,12 +291,12 @@ static void emitSiFiveCLICPreemptibleSaves(MachineFunction &MF,
// which affects other passes.
TII->storeRegToStackSlot(MBB, MBBI, RISCV::X8, /* IsKill=*/true,
RVFI->getInterruptCSRFrameIndex(0),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
+ &RISCV::GPRRegClass, Register(),
+ MachineInstr::FrameSetup);
TII->storeRegToStackSlot(MBB, MBBI, RISCV::X9, /* IsKill=*/true,
RVFI->getInterruptCSRFrameIndex(1),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
+ &RISCV::GPRRegClass, Register(),
+ MachineInstr::FrameSetup);
// Put `mcause` into X8 (s0), and `mepc` into X9 (s1). If either of these are
// used in the function, then they will appear in `getUnmanagedCSI` and will
@@ -357,14 +357,12 @@ static void emitSiFiveCLICPreemptibleRestores(MachineFunction &MF,
// X8 and X9 need to be restored to their values on function entry, which we
// saved onto the stack in `emitSiFiveCLICPreemptibleSaves`.
- TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X9,
- RVFI->getInterruptCSRFrameIndex(1),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
- TII->loadRegFromStackSlot(MBB, MBBI, RISCV::X8,
- RVFI->getInterruptCSRFrameIndex(0),
- &RISCV::GPRRegClass, STI.getRegisterInfo(),
- Register(), MachineInstr::FrameSetup);
+ TII->loadRegFromStackSlot(
+ MBB, MBBI, RISCV::X9, RVFI->getInterruptCSRFrameIndex(1),
+ &RISCV::GPRRegClass, Register(), MachineInstr::FrameSetup);
+ TII->loadRegFromStackSlot(
+ MBB, MBBI, RISCV::X8, RVFI->getInterruptCSRFrameIndex(0),
+ &RISCV::GPRRegClass, Register(), MachineInstr::FrameSetup);
}
// Get the ID of the libcall used for spilling and restoring callee saved
@@ -2177,7 +2175,7 @@ bool RISCVFrameLowering::spillCalleeSavedRegisters(
MCRegister Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(MBB, MI, Reg, !MBB.isLiveIn(Reg),
- CS.getFrameIdx(), RC, TRI, Register(),
+ CS.getFrameIdx(), RC, Register(),
MachineInstr::FrameSetup);
}
};
@@ -2267,8 +2265,8 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
for (auto &CS : CSInfo) {
MCRegister Reg = CS.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
- Register(), MachineInstr::FrameDestroy);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, Register(),
+ MachineInstr::FrameDestroy);
assert(MI != MBB.begin() &&
"loadRegFromStackSlot didn't insert any code!");
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9078335..1cbedb7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -991,6 +991,18 @@ static unsigned getSegInstNF(unsigned Intrinsic) {
}
}
+static bool isApplicableToPLI(int Val) {
+ // Check if the immediate is packed i8 or i10
+ int16_t Bit31To16 = Val >> 16;
+ int16_t Bit15To0 = Val;
+ int8_t Bit15To8 = Bit15To0 >> 8;
+ int8_t Bit7To0 = Val;
+ if (Bit31To16 != Bit15To0)
+ return false;
+
+ return isInt<10>(Bit31To16) || Bit15To8 == Bit7To0;
+}
+
void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected.
if (Node->isMachineOpcode()) {
@@ -1034,6 +1046,14 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
Imm = SignExtend64<32>(Imm);
+ if (Subtarget->enablePExtCodeGen() && isApplicableToPLI(Imm) &&
+ hasAllWUsers(Node)) {
+ // If it's 4 packed 8-bit integers or 2 packed signed 16-bit integers, we
+ // can simply copy lower 32 bits to higher 32 bits to make it able to
+ // rematerialize to PLI_B or PLI_H
+ Imm = ((uint64_t)Imm << 32) | (Imm & 0xFFFFFFFF);
+ }
+
ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget).getNode());
return;
}
@@ -2654,6 +2674,21 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
CurDAG->RemoveDeadNode(Node);
return;
}
+ if (Subtarget->enablePExtCodeGen()) {
+ bool Is32BitCast =
+ (VT == MVT::i32 && (SrcVT == MVT::v4i8 || SrcVT == MVT::v2i16)) ||
+ (SrcVT == MVT::i32 && (VT == MVT::v4i8 || VT == MVT::v2i16));
+ bool Is64BitCast =
+ (VT == MVT::i64 && (SrcVT == MVT::v8i8 || SrcVT == MVT::v4i16 ||
+ SrcVT == MVT::v2i32)) ||
+ (SrcVT == MVT::i64 &&
+ (VT == MVT::v8i8 || VT == MVT::v4i16 || VT == MVT::v2i32));
+ if (Is32BitCast || Is64BitCast) {
+ ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
+ CurDAG->RemoveDeadNode(Node);
+ return;
+ }
+ }
break;
}
case ISD::INSERT_SUBVECTOR:
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a3ccbd8..637f194 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -284,6 +284,18 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::riscv_nxv32i8x2, &RISCV::VRN2M4RegClass);
}
+ // fixed vector is stored in GPRs for P extension packed operations
+ if (Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.is64Bit()) {
+ addRegisterClass(MVT::v2i32, &RISCV::GPRRegClass);
+ addRegisterClass(MVT::v4i16, &RISCV::GPRRegClass);
+ addRegisterClass(MVT::v8i8, &RISCV::GPRRegClass);
+ } else {
+ addRegisterClass(MVT::v2i16, &RISCV::GPRRegClass);
+ addRegisterClass(MVT::v4i8, &RISCV::GPRRegClass);
+ }
+ }
+
// Compute derived properties from the register classes.
computeRegisterProperties(STI.getRegisterInfo());
@@ -492,6 +504,34 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
ISD::FROUNDEVEN, ISD::FCANONICALIZE};
+ if (Subtarget.enablePExtCodeGen()) {
+ setTargetDAGCombine(ISD::TRUNCATE);
+ setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
+ setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand);
+ SmallVector<MVT, 2> VTs;
+ if (Subtarget.is64Bit()) {
+ VTs.append({MVT::v2i32, MVT::v4i16, MVT::v8i8});
+ setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
+ setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
+ setTruncStoreAction(MVT::v8i16, MVT::v8i8, Expand);
+ setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
+ setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand);
+ setOperationAction(ISD::LOAD, MVT::v2i16, Custom);
+ setOperationAction(ISD::LOAD, MVT::v4i8, Custom);
+ } else {
+ VTs.append({MVT::v2i16, MVT::v4i8});
+ }
+ setOperationAction(ISD::UADDSAT, VTs, Legal);
+ setOperationAction(ISD::SADDSAT, VTs, Legal);
+ setOperationAction(ISD::USUBSAT, VTs, Legal);
+ setOperationAction(ISD::SSUBSAT, VTs, Legal);
+ setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU}, VTs, Legal);
+ setOperationAction({ISD::ABDS, ISD::ABDU}, VTs, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, VTs, Custom);
+ setOperationAction(ISD::BITCAST, VTs, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VTs, Custom);
+ }
+
if (Subtarget.hasStdExtZfbfmin()) {
setOperationAction(ISD::BITCAST, MVT::i16, Custom);
setOperationAction(ISD::ConstantFP, MVT::bf16, Expand);
@@ -1776,6 +1816,15 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
MaxLoadsPerMemcmp = Subtarget.getMaxLoadsPerMemcmp(/*OptSize=*/false);
}
+TargetLoweringBase::LegalizeTypeAction
+RISCVTargetLowering::getPreferredVectorAction(MVT VT) const {
+ if (Subtarget.is64Bit() && Subtarget.enablePExtCodeGen())
+ if (VT == MVT::v2i16 || VT == MVT::v4i8)
+ return TypeWidenVector;
+
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext &Context,
EVT VT) const {
@@ -4391,6 +4440,37 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
MVT XLenVT = Subtarget.getXLenVT();
SDLoc DL(Op);
+ // Handle P extension packed vector BUILD_VECTOR with PLI for splat constants
+ if (Subtarget.enablePExtCodeGen()) {
+ bool IsPExtVector =
+ (VT == MVT::v2i16 || VT == MVT::v4i8) ||
+ (Subtarget.is64Bit() &&
+ (VT == MVT::v4i16 || VT == MVT::v8i8 || VT == MVT::v2i32));
+ if (IsPExtVector) {
+ if (SDValue SplatValue = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
+ if (auto *C = dyn_cast<ConstantSDNode>(SplatValue)) {
+ int64_t SplatImm = C->getSExtValue();
+ bool IsValidImm = false;
+
+ // Check immediate range based on vector type
+ if (VT == MVT::v8i8 || VT == MVT::v4i8) {
+ // PLI_B uses 8-bit unsigned or unsigned immediate
+ IsValidImm = isUInt<8>(SplatImm) || isInt<8>(SplatImm);
+ if (isUInt<8>(SplatImm))
+ SplatImm = (int8_t)SplatImm;
+ } else {
+ // PLI_H and PLI_W use 10-bit signed immediate
+ IsValidImm = isInt<10>(SplatImm);
+ }
+
+ if (IsValidImm) {
+ SDValue Imm = DAG.getSignedTargetConstant(SplatImm, DL, XLenVT);
+ return DAG.getNode(RISCVISD::PLI, DL, VT, Imm);
+ }
+ }
+ }
+ }
+ }
// Proper support for f16 requires Zvfh. bf16 always requires special
// handling. We need to cast the scalar to integer and create an integer
@@ -7546,6 +7626,19 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
}
+ if (Subtarget.enablePExtCodeGen()) {
+ bool Is32BitCast =
+ (VT == MVT::i32 && (Op0VT == MVT::v4i8 || Op0VT == MVT::v2i16)) ||
+ (Op0VT == MVT::i32 && (VT == MVT::v4i8 || VT == MVT::v2i16));
+ bool Is64BitCast =
+ (VT == MVT::i64 && (Op0VT == MVT::v8i8 || Op0VT == MVT::v4i16 ||
+ Op0VT == MVT::v2i32)) ||
+ (Op0VT == MVT::i64 &&
+ (VT == MVT::v8i8 || VT == MVT::v4i16 || VT == MVT::v2i32));
+ if (Is32BitCast || Is64BitCast)
+ return Op;
+ }
+
// Consider other scalar<->scalar casts as legal if the types are legal.
// Otherwise expand them.
if (!VT.isVector() && !Op0VT.isVector()) {
@@ -8218,6 +8311,17 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
auto *Store = cast<StoreSDNode>(Op);
SDValue StoredVal = Store->getValue();
EVT VT = StoredVal.getValueType();
+ if (Subtarget.enablePExtCodeGen()) {
+ if (VT == MVT::v2i16 || VT == MVT::v4i8) {
+ SDValue DL(Op);
+ SDValue Cast = DAG.getBitcast(MVT::i32, StoredVal);
+ SDValue NewStore =
+ DAG.getStore(Store->getChain(), DL, Cast, Store->getBasePtr(),
+ Store->getPointerInfo(), Store->getBaseAlign(),
+ Store->getMemOperand()->getFlags());
+ return NewStore;
+ }
+ }
if (VT == MVT::f64) {
assert(Subtarget.hasStdExtZdinx() && !Subtarget.hasStdExtZilsd() &&
!Subtarget.is64Bit() && "Unexpected custom legalisation");
@@ -10500,6 +10604,17 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
return DAG.getNode(RISCVISD::FMV_H_X, DL, EltVT, IntExtract);
}
+ if (Subtarget.enablePExtCodeGen() && VecVT.isFixedLengthVector()) {
+ if (VecVT != MVT::v4i16 && VecVT != MVT::v2i16 && VecVT != MVT::v8i8 &&
+ VecVT != MVT::v4i8 && VecVT != MVT::v2i32)
+ return SDValue();
+ SDValue Extracted = DAG.getBitcast(XLenVT, Vec);
+ unsigned ElemWidth = EltVT.getSizeInBits();
+ SDValue Shamt = DAG.getNode(ISD::MUL, DL, XLenVT, Idx,
+ DAG.getConstant(ElemWidth, DL, XLenVT));
+ return DAG.getNode(ISD::SRL, DL, XLenVT, Extracted, Shamt);
+ }
+
// If this is a fixed vector, we need to convert it to a scalable vector.
MVT ContainerVT = VecVT;
if (VecVT.isFixedLengthVector()) {
@@ -14642,6 +14757,21 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
+ if (Subtarget.is64Bit() && Subtarget.enablePExtCodeGen()) {
+ SDLoc DL(N);
+ SDValue ExtLoad =
+ DAG.getExtLoad(ISD::SEXTLOAD, DL, MVT::i64, Ld->getChain(),
+ Ld->getBasePtr(), MVT::i32, Ld->getMemOperand());
+ if (N->getValueType(0) == MVT::v2i16) {
+ Results.push_back(DAG.getBitcast(MVT::v4i16, ExtLoad));
+ Results.push_back(ExtLoad.getValue(1));
+ } else if (N->getValueType(0) == MVT::v4i8) {
+ Results.push_back(DAG.getBitcast(MVT::v8i8, ExtLoad));
+ Results.push_back(ExtLoad.getValue(1));
+ }
+ return;
+ }
+
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
@@ -14997,6 +15127,21 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
break;
}
+ case RISCVISD::PASUB:
+ case RISCVISD::PASUBU: {
+ MVT VT = N->getSimpleValueType(0);
+ SDValue Op0 = N->getOperand(0);
+ SDValue Op1 = N->getOperand(1);
+ assert(VT == MVT::v2i16 || VT == MVT::v4i8);
+ MVT NewVT = MVT::v4i16;
+ if (VT == MVT::v4i8)
+ NewVT = MVT::v8i8;
+ SDValue Undef = DAG.getUNDEF(VT);
+ Op0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, NewVT, {Op0, Undef});
+ Op1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, NewVT, {Op1, Undef});
+ Results.push_back(DAG.getNode(N->getOpcode(), DL, NewVT, {Op0, Op1}));
+ return;
+ }
case ISD::EXTRACT_VECTOR_ELT: {
// Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
// type is illegal (currently only vXi64 RV32).
@@ -16104,11 +16249,84 @@ static SDValue combineTruncSelectToSMaxUSat(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(ISD::TRUNCATE, DL, VT, Min);
}
+// Handle P extension averaging subtraction pattern:
+// (vXiY (trunc (srl (sub ([s|z]ext vXiY:$a), ([s|z]ext vXiY:$b)), 1)))
+// -> PASUB/PASUBU
+static SDValue combinePExtTruncate(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ if (N0.getOpcode() != ISD::SRL)
+ return SDValue();
+
+ MVT VecVT = VT.getSimpleVT();
+ if (VecVT != MVT::v4i16 && VecVT != MVT::v2i16 && VecVT != MVT::v8i8 &&
+ VecVT != MVT::v4i8 && VecVT != MVT::v2i32)
+ return SDValue();
+
+ // Check if shift amount is 1
+ SDValue ShAmt = N0.getOperand(1);
+ if (ShAmt.getOpcode() != ISD::BUILD_VECTOR)
+ return SDValue();
+
+ BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(ShAmt.getNode());
+ if (!BV)
+ return SDValue();
+ SDValue Splat = BV->getSplatValue();
+ if (!Splat)
+ return SDValue();
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Splat);
+ if (!C)
+ return SDValue();
+ if (C->getZExtValue() != 1)
+ return SDValue();
+
+ // Check for SUB operation
+ SDValue Sub = N0.getOperand(0);
+ if (Sub.getOpcode() != ISD::SUB)
+ return SDValue();
+
+ SDValue LHS = Sub.getOperand(0);
+ SDValue RHS = Sub.getOperand(1);
+
+ // Check if both operands are sign/zero extends from the target
+ // type
+ bool IsSignExt = LHS.getOpcode() == ISD::SIGN_EXTEND &&
+ RHS.getOpcode() == ISD::SIGN_EXTEND;
+ bool IsZeroExt = LHS.getOpcode() == ISD::ZERO_EXTEND &&
+ RHS.getOpcode() == ISD::ZERO_EXTEND;
+
+ if (!IsSignExt && !IsZeroExt)
+ return SDValue();
+
+ SDValue A = LHS.getOperand(0);
+ SDValue B = RHS.getOperand(0);
+
+ // Check if the extends are from our target vector type
+ if (A.getValueType() != VT || B.getValueType() != VT)
+ return SDValue();
+
+ // Determine the instruction based on type and signedness
+ unsigned Opc;
+ if (IsSignExt)
+ Opc = RISCVISD::PASUB;
+ else if (IsZeroExt)
+ Opc = RISCVISD::PASUBU;
+ else
+ return SDValue();
+
+ // Create the machine node directly
+ return DAG.getNode(Opc, SDLoc(N), VT, {A, B});
+}
+
static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
+ if (VT.isFixedLengthVector() && Subtarget.enablePExtCodeGen())
+ return combinePExtTruncate(N, DAG, Subtarget);
+
// Pre-promote (i1 (truncate (srl X, Y))) on RV64 with Zbs without zero
// extending X. This is safe since we only need the LSB after the shift and
// shift amounts larger than 31 would produce poison. If we wait until
@@ -22203,8 +22421,7 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
MachineFunction &MF = *BB->getParent();
DebugLoc DL = MI.getDebugLoc();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
Register LoReg = MI.getOperand(0).getReg();
Register HiReg = MI.getOperand(1).getReg();
Register SrcReg = MI.getOperand(2).getReg();
@@ -22213,7 +22430,7 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
- RI, Register());
+ Register());
MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
MachineMemOperand *MMOLo =
MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
@@ -22239,8 +22456,7 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
MachineFunction &MF = *BB->getParent();
DebugLoc DL = MI.getDebugLoc();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
- const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
+ const RISCVInstrInfo &TII = *MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
Register DstReg = MI.getOperand(0).getReg();
Register LoReg = MI.getOperand(1).getReg();
Register HiReg = MI.getOperand(2).getReg();
@@ -22263,7 +22479,7 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
.addFrameIndex(FI)
.addImm(4)
.addMemOperand(MMOHi);
- TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI, Register());
+ TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, Register());
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index dd62a9c..5cc427c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -71,6 +71,9 @@ public:
bool preferScalarizeSplat(SDNode *N) const override;
+ /// Customize the preferred legalization strategy for certain types.
+ LegalizeTypeAction getPreferredVectorAction(MVT VT) const override;
+
bool softPromoteHalfType() const override { return true; }
/// Return the register type for a given MVT, ensuring vectors are treated
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index ce8dd3b..9d54212 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -639,7 +639,6 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
@@ -647,8 +646,8 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
unsigned Opcode;
if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
- Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
- RISCV::SW : RISCV::SD;
+ Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
+ : RISCV::SD;
} else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
Opcode = RISCV::SH_INX;
} else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
@@ -705,7 +704,7 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
.addFrameIndex(FI)
.addMemOperand(MMO)
.setMIFlag(Flags);
- NumVRegSpilled += TRI->getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
+ NumVRegSpilled += RegInfo.getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
} else {
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
@@ -720,10 +719,12 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
}
}
-void RISCVInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DstReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DstReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
MachineFunction *MF = MBB.getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
DebugLoc DL =
@@ -731,8 +732,8 @@ void RISCVInstrInfo::loadRegFromStackSlot(
unsigned Opcode;
if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
- Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
- RISCV::LW : RISCV::LD;
+ Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
+ : RISCV::LD;
} else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
Opcode = RISCV::LH_INX;
} else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
@@ -788,7 +789,7 @@ void RISCVInstrInfo::loadRegFromStackSlot(
.addFrameIndex(FI)
.addMemOperand(MMO)
.setMIFlag(Flags);
- NumVRegReloaded += TRI->getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
+ NumVRegReloaded += RegInfo.getRegSizeInBits(*RC) / RISCV::RVVBitsPerBlock;
} else {
MachineMemOperand *MMO = MF->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
@@ -1379,14 +1380,14 @@ void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
report_fatal_error("underestimated function size");
storeRegToStackSlot(MBB, MI, TmpGPR, /*IsKill=*/true, FrameIndex,
- &RISCV::GPRRegClass, TRI, Register());
+ &RISCV::GPRRegClass, Register());
TRI->eliminateFrameIndex(std::prev(MI.getIterator()),
/*SpAdj=*/0, /*FIOperandNum=*/1);
MI.getOperand(1).setMBB(&RestoreBB);
loadRegFromStackSlot(RestoreBB, RestoreBB.end(), TmpGPR, FrameIndex,
- &RISCV::GPRRegClass, TRI, Register());
+ &RISCV::GPRRegClass, Register());
TRI->eliminateFrameIndex(RestoreBB.back(),
/*SpAdj=*/0, /*FIOperandNum=*/1);
}
@@ -2914,6 +2915,9 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
case RISCVOp::OPERAND_UIMM9_LSB000:
Ok = isShiftedUInt<6, 3>(Imm);
break;
+ case RISCVOp::OPERAND_SIMM8_UNSIGNED:
+ Ok = isInt<8>(Imm);
+ break;
case RISCVOp::OPERAND_SIMM10_LSB0000_NONZERO:
Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
break;
@@ -2935,6 +2939,7 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
// clang-format off
CASE_OPERAND_SIMM(5)
CASE_OPERAND_SIMM(6)
+ CASE_OPERAND_SIMM(10)
CASE_OPERAND_SIMM(11)
CASE_OPERAND_SIMM(12)
CASE_OPERAND_SIMM(26)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 800af26..0ffe015 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -116,13 +116,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool IsKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
using TargetInstrInfo::foldMemoryOperandImpl;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
index 4cbbba3..7637047 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td
@@ -18,7 +18,7 @@
// Operand and SDNode transformation definitions.
//===----------------------------------------------------------------------===//
-def simm10 : RISCVSImmOp<10>;
+def simm10 : RISCVSImmOp<10>, TImmLeaf<XLenVT, "return isInt<10>(Imm);">;
def SImm8UnsignedAsmOperand : SImmAsmOperand<8, "Unsigned"> {
let RenderMethod = "addSImm8UnsignedOperands";
@@ -26,7 +26,7 @@ def SImm8UnsignedAsmOperand : SImmAsmOperand<8, "Unsigned"> {
// A 8-bit signed immediate allowing range [-128, 255]
// but represented as [-128, 127].
-def simm8_unsigned : RISCVOp {
+def simm8_unsigned : RISCVOp, TImmLeaf<XLenVT, "return isInt<8>(Imm);"> {
let ParserMatchClass = SImm8UnsignedAsmOperand;
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeSImmOperand<8>";
@@ -1463,8 +1463,91 @@ let Predicates = [HasStdExtP, IsRV32] in {
def riscv_absw : RVSDNode<"ABSW", SDTIntUnaryOp>;
-let Predicates = [HasStdExtP] in
-def : PatGpr<abs, ABS>;
+def SDT_RISCVPLI : SDTypeProfile<1, 1, [SDTCisVec<0>,
+ SDTCisInt<0>,
+ SDTCisInt<1>]>;
+def riscv_pli : RVSDNode<"PLI", SDT_RISCVPLI>;
+def SDT_RISCVPASUB : SDTypeProfile<1, 2, [SDTCisVec<0>,
+ SDTCisInt<0>,
+ SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>]>;
+def riscv_pasub : RVSDNode<"PASUB", SDT_RISCVPASUB>;
+def riscv_pasubu : RVSDNode<"PASUBU", SDT_RISCVPASUB>;
-let Predicates = [HasStdExtP, IsRV64] in
-def : PatGpr<riscv_absw, ABSW>;
+let Predicates = [HasStdExtP] in {
+ def : PatGpr<abs, ABS>;
+
+ // Basic 8-bit arithmetic patterns
+ def: Pat<(XLenVecI8VT (add GPR:$rs1, GPR:$rs2)), (PADD_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (sub GPR:$rs1, GPR:$rs2)), (PSUB_B GPR:$rs1, GPR:$rs2)>;
+
+ // Basic 16-bit arithmetic patterns
+ def: Pat<(XLenVecI16VT (add GPR:$rs1, GPR:$rs2)), (PADD_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (sub GPR:$rs1, GPR:$rs2)), (PSUB_H GPR:$rs1, GPR:$rs2)>;
+
+ // 8-bit saturating add/sub patterns
+ def: Pat<(XLenVecI8VT (saddsat GPR:$rs1, GPR:$rs2)), (PSADD_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (uaddsat GPR:$rs1, GPR:$rs2)), (PSADDU_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (ssubsat GPR:$rs1, GPR:$rs2)), (PSSUB_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (usubsat GPR:$rs1, GPR:$rs2)), (PSSUBU_B GPR:$rs1, GPR:$rs2)>;
+
+ // 16-bit saturating add/sub patterns
+ def: Pat<(XLenVecI16VT (saddsat GPR:$rs1, GPR:$rs2)), (PSADD_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (uaddsat GPR:$rs1, GPR:$rs2)), (PSADDU_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (ssubsat GPR:$rs1, GPR:$rs2)), (PSSUB_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (usubsat GPR:$rs1, GPR:$rs2)), (PSSUBU_H GPR:$rs1, GPR:$rs2)>;
+
+ // 8-bit averaging patterns
+ def: Pat<(XLenVecI8VT (avgfloors GPR:$rs1, GPR:$rs2)), (PAADD_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (avgflooru GPR:$rs1, GPR:$rs2)), (PAADDU_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (riscv_pasub GPR:$rs1, GPR:$rs2)), (PASUB_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (riscv_pasubu GPR:$rs1, GPR:$rs2)), (PASUBU_B GPR:$rs1, GPR:$rs2)>;
+
+ // 16-bit averaging patterns
+ def: Pat<(XLenVecI16VT (avgfloors GPR:$rs1, GPR:$rs2)), (PAADD_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (avgflooru GPR:$rs1, GPR:$rs2)), (PAADDU_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (riscv_pasub GPR:$rs1, GPR:$rs2)), (PASUB_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (riscv_pasubu GPR:$rs1, GPR:$rs2)), (PASUBU_H GPR:$rs1, GPR:$rs2)>;
+
+ // 8-bit absolute difference patterns
+ def: Pat<(XLenVecI8VT (abds GPR:$rs1, GPR:$rs2)), (PDIF_B GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI8VT (abdu GPR:$rs1, GPR:$rs2)), (PDIFU_B GPR:$rs1, GPR:$rs2)>;
+
+ // 16-bit absolute difference patterns
+ def: Pat<(XLenVecI16VT (abds GPR:$rs1, GPR:$rs2)), (PDIF_H GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(XLenVecI16VT (abdu GPR:$rs1, GPR:$rs2)), (PDIFU_H GPR:$rs1, GPR:$rs2)>;
+
+
+ // 8-bit PLI SD node pattern
+ def: Pat<(XLenVecI8VT (riscv_pli simm8_unsigned:$imm8)), (PLI_B simm8_unsigned:$imm8)>;
+ // 16-bit PLI SD node pattern
+ def: Pat<(XLenVecI16VT (riscv_pli simm10:$imm10)), (PLI_H simm10:$imm10)>;
+
+} // Predicates = [HasStdExtP]
+
+let Predicates = [HasStdExtP, IsRV32] in {
+ // Load/Store patterns
+ def : StPat<store, SW, GPR, v4i8>;
+ def : StPat<store, SW, GPR, v2i16>;
+ def : LdPat<load, LW, v4i8>;
+ def : LdPat<load, LW, v2i16>;
+} // Predicates = [HasStdExtP, IsRV32]
+
+let Predicates = [HasStdExtP, IsRV64] in {
+ def : PatGpr<riscv_absw, ABSW>;
+
+ // 32-bit PLI SD node pattern
+ def: Pat<(v2i32 (riscv_pli simm10:$imm10)), (PLI_W simm10:$imm10)>;
+
+ // 32-bit averaging-sub patterns
+ def: Pat<(v2i32 (riscv_pasub GPR:$rs1, GPR:$rs2)), (PASUB_W GPR:$rs1, GPR:$rs2)>;
+ def: Pat<(v2i32 (riscv_pasubu GPR:$rs1, GPR:$rs2)), (PASUBU_W GPR:$rs1, GPR:$rs2)>;
+
+ // Load/Store patterns
+ def : StPat<store, SD, GPR, v8i8>;
+ def : StPat<store, SD, GPR, v4i16>;
+ def : StPat<store, SD, GPR, v2i32>;
+ def : LdPat<load, LD, v8i8>;
+ def : LdPat<load, LD, v4i16>;
+ def : LdPat<load, LD, v2i32>;
+} // Predicates = [HasStdExtP, IsRV64]
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 6605a5c..87095e7 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -222,6 +222,12 @@ def XLenFVT : ValueTypeByHwMode<[RV64],
[f64]>;
def XLenPairFVT : ValueTypeByHwMode<[RV32],
[f64]>;
+
+// P extension
+def XLenVecI8VT : ValueTypeByHwMode<[RV32, RV64],
+ [v4i8, v8i8]>;
+def XLenVecI16VT : ValueTypeByHwMode<[RV32, RV64],
+ [v2i16, v4i16]>;
def XLenRI : RegInfoByHwMode<
[RV32, RV64],
[RegInfo<32,32,32>, RegInfo<64,64,64>]>;
@@ -238,7 +244,9 @@ class RISCVRegisterClass<list<ValueType> regTypes, int align, dag regList>
}
class GPRRegisterClass<dag regList>
- : RISCVRegisterClass<[XLenVT, XLenFVT], 32, regList> {
+ : RISCVRegisterClass<[XLenVT, XLenFVT,
+ // P extension packed vector types:
+ XLenVecI8VT, XLenVecI16VT, v2i32], 32, regList> {
let RegInfos = XLenRI;
}
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index 3b43be3..926cc9ea 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -69,6 +69,12 @@ static cl::opt<bool> UseMIPSCCMovInsn("use-riscv-mips-ccmov",
cl::desc("Use 'mips.ccmov' instruction"),
cl::init(true), cl::Hidden);
+static cl::opt<bool> EnablePExtCodeGen(
+ "enable-p-ext-codegen",
+ cl::desc("Turn on P Extension codegen(This is a temporary switch where "
+ "only partial codegen is currently supported)"),
+ cl::init(false), cl::Hidden);
+
void RISCVSubtarget::anchor() {}
RISCVSubtarget &
@@ -145,6 +151,10 @@ bool RISCVSubtarget::useConstantPoolForLargeInts() const {
return !RISCVDisableUsingConstantPoolForLargeInts;
}
+bool RISCVSubtarget::enablePExtCodeGen() const {
+ return HasStdExtP && EnablePExtCodeGen;
+}
+
unsigned RISCVSubtarget::getMaxBuildIntsCost() const {
// Loading integer from constant pool needs two instructions (the reason why
// the minimum cost is 2): an address calculation instruction and a load
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index d5ffa6c..f05115d 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -146,6 +146,7 @@ public:
}
bool enableMachineScheduler() const override { return true; }
+ bool enableTerminalRule() const override { return true; }
bool enablePostRAScheduler() const override { return UsePostRAScheduler; }
@@ -321,6 +322,8 @@ public:
}
}
+ bool enablePExtCodeGen() const;
+
// Returns VLEN divided by DLEN. Where DLEN is the datapath width of the
// vector hardware implementation which may be less than VLEN.
unsigned getDLenFactor() const {
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 3d8eb40..dca6e9c 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -969,6 +969,13 @@ InstructionCost RISCVTTIImpl::getScalarizationOverhead(
if (isa<ScalableVectorType>(Ty))
return InstructionCost::getInvalid();
+ // TODO: Add proper cost model for P extension fixed vectors (e.g., v4i16)
+ // For now, skip all fixed vector cost analysis when P extension is available
+ // to avoid crashes in getMinRVVVectorSizeInBits()
+ if (ST->enablePExtCodeGen() && isa<FixedVectorType>(Ty)) {
+ return 1; // Treat as single instruction cost for now
+ }
+
// A build_vector (which is m1 sized or smaller) can be done in no
// worse than one vslide1down.vx per element in the type. We could
// in theory do an explode_vector in the inverse manner, but our
@@ -1625,6 +1632,14 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
if (!IsVectorType)
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+ // TODO: Add proper cost model for P extension fixed vectors (e.g., v4i16)
+ // For now, skip all fixed vector cost analysis when P extension is available
+ // to avoid crashes in getMinRVVVectorSizeInBits()
+ if (ST->enablePExtCodeGen() &&
+ (isa<FixedVectorType>(Dst) || isa<FixedVectorType>(Src))) {
+ return 1; // Treat as single instruction cost for now
+ }
+
// FIXME: Need to compute legalizing cost for illegal types. The current
// code handles only legal types and those which can be trivially
// promoted to legal.
@@ -2323,6 +2338,13 @@ InstructionCost RISCVTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
const Value *Op1) const {
assert(Val->isVectorTy() && "This must be a vector type");
+ // TODO: Add proper cost model for P extension fixed vectors (e.g., v4i16)
+ // For now, skip all fixed vector cost analysis when P extension is available
+ // to avoid crashes in getMinRVVVectorSizeInBits()
+ if (ST->enablePExtCodeGen() && isa<FixedVectorType>(Val)) {
+ return 1; // Treat as single instruction cost for now
+ }
+
if (Opcode != Instruction::ExtractElement &&
Opcode != Instruction::InsertElement)
return BaseT::getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index fdf9a4f..e1ff243 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -455,7 +455,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) {
True->getOperand(1).setReg(MI.getOperand(2).getReg());
// If True is masked then its passthru needs to be in VRNoV0.
MRI->constrainRegClass(True->getOperand(1).getReg(),
- TII->getRegClass(True->getDesc(), 1, TRI));
+ TII->getRegClass(True->getDesc(), 1));
}
MI.setDesc(TII->get(NewOpc));
@@ -675,7 +675,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
if (Passthru.getReg().isValid())
MRI->constrainRegClass(
Passthru.getReg(),
- TII->getRegClass(Src->getDesc(), SrcPassthru.getOperandNo(), TRI));
+ TII->getRegClass(Src->getDesc(), SrcPassthru.getOperandNo()));
}
if (RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags)) {
diff --git a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h
index a329fd5e..c99d603 100644
--- a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h
+++ b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h
@@ -22,8 +22,6 @@
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
-#include <type_traits>
-
namespace llvm {
namespace SPIRV {
diff --git a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index 7c1db3c..ef45d31 100644
--- a/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -235,7 +235,7 @@ private:
};
struct RegOp {
- unsigned RegNum;
+ MCRegister Reg;
RegisterKind Kind;
};
@@ -244,8 +244,8 @@ private:
};
struct MemOp {
- unsigned Base;
- unsigned OffsetReg;
+ MCRegister Base;
+ MCRegister OffsetReg;
const MCExpr *Off;
};
@@ -326,7 +326,7 @@ public:
MCRegister getReg() const override {
assert((Kind == k_Register) && "Invalid access!");
- return Reg.RegNum;
+ return Reg.Reg;
}
const MCExpr *getImm() const {
@@ -334,12 +334,12 @@ public:
return Imm.Val;
}
- unsigned getMemBase() const {
+ MCRegister getMemBase() const {
assert((Kind == k_MemoryReg || Kind == k_MemoryImm) && "Invalid access!");
return Mem.Base;
}
- unsigned getMemOffsetReg() const {
+ MCRegister getMemOffsetReg() const {
assert((Kind == k_MemoryReg) && "Invalid access!");
return Mem.OffsetReg;
}
@@ -376,12 +376,16 @@ public:
void print(raw_ostream &OS, const MCAsmInfo &MAI) const override {
switch (Kind) {
case k_Token: OS << "Token: " << getToken() << "\n"; break;
- case k_Register: OS << "Reg: #" << getReg() << "\n"; break;
+ case k_Register:
+ OS << "Reg: #" << getReg().id() << "\n";
+ break;
case k_Immediate: OS << "Imm: " << getImm() << "\n"; break;
- case k_MemoryReg: OS << "Mem: " << getMemBase() << "+"
- << getMemOffsetReg() << "\n"; break;
+ case k_MemoryReg:
+ OS << "Mem: " << getMemBase().id() << "+" << getMemOffsetReg().id()
+ << "\n";
+ break;
case k_MemoryImm: assert(getMemOff() != nullptr);
- OS << "Mem: " << getMemBase() << "+";
+ OS << "Mem: " << getMemBase().id() << "+";
MAI.printExpr(OS, *getMemOff());
OS << "\n";
break;
@@ -432,7 +436,7 @@ public:
Inst.addOperand(MCOperand::createReg(getMemBase()));
- assert(getMemOffsetReg() != 0 && "Invalid offset");
+ assert(getMemOffsetReg().isValid() && "Invalid offset");
Inst.addOperand(MCOperand::createReg(getMemOffsetReg()));
}
@@ -480,10 +484,10 @@ public:
return Op;
}
- static std::unique_ptr<SparcOperand> CreateReg(unsigned RegNum, unsigned Kind,
+ static std::unique_ptr<SparcOperand> CreateReg(MCRegister Reg, unsigned Kind,
SMLoc S, SMLoc E) {
auto Op = std::make_unique<SparcOperand>(k_Register);
- Op->Reg.RegNum = RegNum;
+ Op->Reg.Reg = Reg;
Op->Reg.Kind = (SparcOperand::RegisterKind)Kind;
Op->StartLoc = S;
Op->EndLoc = E;
@@ -540,7 +544,7 @@ public:
regIdx = Reg - Sparc::I0 + 24;
if (regIdx % 2 || regIdx > 31)
return false;
- Op.Reg.RegNum = IntPairRegs[regIdx / 2];
+ Op.Reg.Reg = IntPairRegs[regIdx / 2];
Op.Reg.Kind = rk_IntPairReg;
return true;
}
@@ -551,7 +555,7 @@ public:
unsigned regIdx = Reg - Sparc::F0;
if (regIdx % 2 || regIdx > 31)
return false;
- Op.Reg.RegNum = DoubleRegs[regIdx / 2];
+ Op.Reg.Reg = DoubleRegs[regIdx / 2];
Op.Reg.Kind = rk_DoubleReg;
return true;
}
@@ -574,7 +578,7 @@ public:
Reg = QuadFPRegs[regIdx / 2];
break;
}
- Op.Reg.RegNum = Reg;
+ Op.Reg.Reg = Reg;
Op.Reg.Kind = rk_QuadReg;
return true;
}
@@ -587,13 +591,13 @@ public:
regIdx = Reg - Sparc::C0;
if (regIdx % 2 || regIdx > 31)
return false;
- Op.Reg.RegNum = CoprocPairRegs[regIdx / 2];
+ Op.Reg.Reg = CoprocPairRegs[regIdx / 2];
Op.Reg.Kind = rk_CoprocPairReg;
return true;
}
static std::unique_ptr<SparcOperand>
- MorphToMEMrr(unsigned Base, std::unique_ptr<SparcOperand> Op) {
+ MorphToMEMrr(MCRegister Base, std::unique_ptr<SparcOperand> Op) {
MCRegister offsetReg = Op->getReg();
Op->Kind = k_MemoryReg;
Op->Mem.Base = Base;
@@ -602,8 +606,8 @@ public:
return Op;
}
- static std::unique_ptr<SparcOperand>
- CreateMEMr(unsigned Base, SMLoc S, SMLoc E) {
+ static std::unique_ptr<SparcOperand> CreateMEMr(MCRegister Base, SMLoc S,
+ SMLoc E) {
auto Op = std::make_unique<SparcOperand>(k_MemoryReg);
Op->Mem.Base = Base;
Op->Mem.OffsetReg = Sparc::G0; // always 0
@@ -614,11 +618,11 @@ public:
}
static std::unique_ptr<SparcOperand>
- MorphToMEMri(unsigned Base, std::unique_ptr<SparcOperand> Op) {
+ MorphToMEMri(MCRegister Base, std::unique_ptr<SparcOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryImm;
Op->Mem.Base = Base;
- Op->Mem.OffsetReg = 0;
+ Op->Mem.OffsetReg = MCRegister();
Op->Mem.Off = Imm;
return Op;
}
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index fcd6cd7..6596379 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -527,7 +527,6 @@ void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -564,10 +563,12 @@ void SparcInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
llvm_unreachable("Can't store this register to stack slot");
}
-void SparcInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void SparcInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.h b/llvm/lib/Target/Sparc/SparcInstrInfo.h
index 01d0204..273888f 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.h
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -92,14 +92,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
Register getGlobalBaseReg(MachineFunction *MF) const;
diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp
index 275165d..a24543b 100644
--- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp
+++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZInstPrinterCommon.cpp
@@ -218,7 +218,7 @@ void SystemZInstPrinterCommon::printBDXAddrOperand(const MCInst *MI, int OpNum,
void SystemZInstPrinterCommon::printBDLAddrOperand(const MCInst *MI, int OpNum,
raw_ostream &O) {
- unsigned Base = MI->getOperand(OpNum).getReg();
+ MCRegister Base = MI->getOperand(OpNum).getReg();
const MCOperand &DispMO = MI->getOperand(OpNum + 1);
uint64_t Length = MI->getOperand(OpNum + 2).getImm();
printOperand(DispMO, &MAI, O);
@@ -232,9 +232,9 @@ void SystemZInstPrinterCommon::printBDLAddrOperand(const MCInst *MI, int OpNum,
void SystemZInstPrinterCommon::printBDRAddrOperand(const MCInst *MI, int OpNum,
raw_ostream &O) {
- unsigned Base = MI->getOperand(OpNum).getReg();
+ MCRegister Base = MI->getOperand(OpNum).getReg();
const MCOperand &DispMO = MI->getOperand(OpNum + 1);
- unsigned Length = MI->getOperand(OpNum + 2).getReg();
+ MCRegister Length = MI->getOperand(OpNum + 2).getReg();
printOperand(DispMO, &MAI, O);
O << "(";
printRegName(O, Length);
diff --git a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index dcefff9..570bbd8 100644
--- a/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -360,12 +360,12 @@ bool SystemZELFFrameLowering::spillCalleeSavedRegisters(
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
}
if (SystemZ::VR128BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
}
@@ -389,10 +389,10 @@ bool SystemZELFFrameLowering::restoreCalleeSavedRegisters(
MCRegister Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
if (SystemZ::VR128BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
// Restore call-saved GPRs (but not call-clobbered varargs, which at
@@ -1157,12 +1157,12 @@ bool SystemZXPLINKFrameLowering::spillCalleeSavedRegisters(
if (SystemZ::FP64BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
}
if (SystemZ::VR128BitRegClass.contains(Reg)) {
MBB.addLiveIn(Reg);
TII->storeRegToStackSlot(MBB, MBBI, Reg, true, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
}
@@ -1189,10 +1189,10 @@ bool SystemZXPLINKFrameLowering::restoreCalleeSavedRegisters(
MCRegister Reg = I.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::FP64BitRegClass, TRI, Register());
+ &SystemZ::FP64BitRegClass, Register());
if (SystemZ::VR128BitRegClass.contains(Reg))
TII->loadRegFromStackSlot(MBB, MBBI, Reg, I.getFrameIdx(),
- &SystemZ::VR128BitRegClass, TRI, Register());
+ &SystemZ::VR128BitRegClass, Register());
}
// Restore call-saved GPRs (but not call-clobbered varargs, which at
diff --git a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
index 5313fba..8fc339f 100644
--- a/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZHazardRecognizer.cpp
@@ -115,11 +115,10 @@ SystemZHazardRecognizer::fitsIntoCurrentGroup(SUnit *SU) const {
}
bool SystemZHazardRecognizer::has4RegOps(const MachineInstr *MI) const {
- const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
const MCInstrDesc &MID = MI->getDesc();
unsigned Count = 0;
for (unsigned OpIdx = 0; OpIdx < MID.getNumOperands(); OpIdx++) {
- const TargetRegisterClass *RC = TII->getRegClass(MID, OpIdx, TRI);
+ const TargetRegisterClass *RC = TII->getRegClass(MID, OpIdx);
if (RC == nullptr)
continue;
if (OpIdx >= MID.getNumDefs() &&
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 23e7e7e..eb1ce4a 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1023,8 +1023,8 @@ void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void SystemZInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Callers may expect a single instruction, so keep 128-bit moves
@@ -1036,10 +1036,12 @@ void SystemZInstrInfo::storeRegToStackSlot(
FrameIdx);
}
-void SystemZInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
// Callers may expect a single instruction, so keep 128-bit moves
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index 7b9ad7b..4aecdd7 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -281,12 +281,14 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
LiveIntervals *LIS) const override;
diff --git a/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp b/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
index 20f561a..9b47d23 100644
--- a/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
+++ b/llvm/lib/Target/VE/AsmParser/VEAsmParser.cpp
@@ -54,7 +54,7 @@ class VEAsmParser : public MCTargetAsmParser {
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
- int parseRegisterName(MCRegister (*matchFn)(StringRef));
+ MCRegister parseRegisterName(MCRegister (*matchFn)(StringRef));
ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
SMLoc &EndLoc) override;
bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
@@ -169,7 +169,7 @@ private:
};
struct RegOp {
- unsigned RegNum;
+ MCRegister Reg;
};
struct ImmOp {
@@ -177,8 +177,8 @@ private:
};
struct MemOp {
- unsigned Base;
- unsigned IndexReg;
+ MCRegister Base;
+ MCRegister IndexReg;
const MCExpr *Index;
const MCExpr *Offset;
};
@@ -342,7 +342,7 @@ public:
MCRegister getReg() const override {
assert((Kind == k_Register) && "Invalid access!");
- return Reg.RegNum;
+ return Reg.Reg;
}
const MCExpr *getImm() const {
@@ -350,14 +350,14 @@ public:
return Imm.Val;
}
- unsigned getMemBase() const {
+ MCRegister getMemBase() const {
assert((Kind == k_MemoryRegRegImm || Kind == k_MemoryRegImmImm ||
Kind == k_MemoryRegImm) &&
"Invalid access!");
return Mem.Base;
}
- unsigned getMemIndexReg() const {
+ MCRegister getMemIndexReg() const {
assert((Kind == k_MemoryRegRegImm || Kind == k_MemoryZeroRegImm) &&
"Invalid access!");
return Mem.IndexReg;
@@ -415,20 +415,21 @@ public:
OS << "Token: " << getToken() << "\n";
break;
case k_Register:
- OS << "Reg: #" << getReg() << "\n";
+ OS << "Reg: #" << getReg().id() << "\n";
break;
case k_Immediate:
OS << "Imm: " << getImm() << "\n";
break;
case k_MemoryRegRegImm:
assert(getMemOffset() != nullptr);
- OS << "Mem: #" << getMemBase() << "+#" << getMemIndexReg() << "+";
+ OS << "Mem: #" << getMemBase().id() << "+#" << getMemIndexReg().id()
+ << "+";
MAI.printExpr(OS, *getMemOffset());
OS << "\n";
break;
case k_MemoryRegImmImm:
assert(getMemIndex() != nullptr && getMemOffset() != nullptr);
- OS << "Mem: #" << getMemBase() << "+";
+ OS << "Mem: #" << getMemBase().id() << "+";
MAI.printExpr(OS, *getMemIndex());
OS << "+";
MAI.printExpr(OS, *getMemOffset());
@@ -436,7 +437,7 @@ public:
break;
case k_MemoryZeroRegImm:
assert(getMemOffset() != nullptr);
- OS << "Mem: 0+#" << getMemIndexReg() << "+";
+ OS << "Mem: 0+#" << getMemIndexReg().id() << "+";
MAI.printExpr(OS, *getMemOffset());
OS << "\n";
break;
@@ -450,7 +451,7 @@ public:
break;
case k_MemoryRegImm:
assert(getMemOffset() != nullptr);
- OS << "Mem: #" << getMemBase() << "+";
+ OS << "Mem: #" << getMemBase().id() << "+";
MAI.printExpr(OS, *getMemOffset());
OS << "\n";
break;
@@ -606,10 +607,10 @@ public:
return Op;
}
- static std::unique_ptr<VEOperand> CreateReg(unsigned RegNum, SMLoc S,
+ static std::unique_ptr<VEOperand> CreateReg(MCRegister Reg, SMLoc S,
SMLoc E) {
auto Op = std::make_unique<VEOperand>(k_Register);
- Op->Reg.RegNum = RegNum;
+ Op->Reg.Reg = Reg;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
@@ -653,38 +654,38 @@ public:
}
static bool MorphToI32Reg(VEOperand &Op) {
- unsigned Reg = Op.getReg();
+ MCRegister Reg = Op.getReg();
unsigned regIdx = Reg - VE::SX0;
if (regIdx > 63)
return false;
- Op.Reg.RegNum = I32Regs[regIdx];
+ Op.Reg.Reg = I32Regs[regIdx];
return true;
}
static bool MorphToF32Reg(VEOperand &Op) {
- unsigned Reg = Op.getReg();
+ MCRegister Reg = Op.getReg();
unsigned regIdx = Reg - VE::SX0;
if (regIdx > 63)
return false;
- Op.Reg.RegNum = F32Regs[regIdx];
+ Op.Reg.Reg = F32Regs[regIdx];
return true;
}
static bool MorphToF128Reg(VEOperand &Op) {
- unsigned Reg = Op.getReg();
+ MCRegister Reg = Op.getReg();
unsigned regIdx = Reg - VE::SX0;
if (regIdx % 2 || regIdx > 63)
return false;
- Op.Reg.RegNum = F128Regs[regIdx / 2];
+ Op.Reg.Reg = F128Regs[regIdx / 2];
return true;
}
static bool MorphToVM512Reg(VEOperand &Op) {
- unsigned Reg = Op.getReg();
+ MCRegister Reg = Op.getReg();
unsigned regIdx = Reg - VE::VM0;
if (regIdx % 2 || regIdx > 15)
return false;
- Op.Reg.RegNum = VM512Regs[regIdx / 2];
+ Op.Reg.Reg = VM512Regs[regIdx / 2];
return true;
}
@@ -696,16 +697,16 @@ public:
if (regIdx > 31 || MISCRegs[regIdx] == VE::NoRegister)
return false;
Op.Kind = k_Register;
- Op.Reg.RegNum = MISCRegs[regIdx];
+ Op.Reg.Reg = MISCRegs[regIdx];
return true;
}
static std::unique_ptr<VEOperand>
- MorphToMEMri(unsigned Base, std::unique_ptr<VEOperand> Op) {
+ MorphToMEMri(MCRegister Base, std::unique_ptr<VEOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryRegImm;
Op->Mem.Base = Base;
- Op->Mem.IndexReg = 0;
+ Op->Mem.IndexReg = MCRegister();
Op->Mem.Index = nullptr;
Op->Mem.Offset = Imm;
return Op;
@@ -715,15 +716,16 @@ public:
MorphToMEMzi(std::unique_ptr<VEOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryZeroImm;
- Op->Mem.Base = 0;
- Op->Mem.IndexReg = 0;
+ Op->Mem.Base = MCRegister();
+ Op->Mem.IndexReg = MCRegister();
Op->Mem.Index = nullptr;
Op->Mem.Offset = Imm;
return Op;
}
static std::unique_ptr<VEOperand>
- MorphToMEMrri(unsigned Base, unsigned Index, std::unique_ptr<VEOperand> Op) {
+ MorphToMEMrri(MCRegister Base, MCRegister Index,
+ std::unique_ptr<VEOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryRegRegImm;
Op->Mem.Base = Base;
@@ -734,22 +736,22 @@ public:
}
static std::unique_ptr<VEOperand>
- MorphToMEMrii(unsigned Base, const MCExpr *Index,
+ MorphToMEMrii(MCRegister Base, const MCExpr *Index,
std::unique_ptr<VEOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryRegImmImm;
Op->Mem.Base = Base;
- Op->Mem.IndexReg = 0;
+ Op->Mem.IndexReg = MCRegister();
Op->Mem.Index = Index;
Op->Mem.Offset = Imm;
return Op;
}
static std::unique_ptr<VEOperand>
- MorphToMEMzri(unsigned Index, std::unique_ptr<VEOperand> Op) {
+ MorphToMEMzri(MCRegister Index, std::unique_ptr<VEOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryZeroRegImm;
- Op->Mem.Base = 0;
+ Op->Mem.Base = MCRegister();
Op->Mem.IndexReg = Index;
Op->Mem.Index = nullptr;
Op->Mem.Offset = Imm;
@@ -760,8 +762,8 @@ public:
MorphToMEMzii(const MCExpr *Index, std::unique_ptr<VEOperand> Op) {
const MCExpr *Imm = Op->getImm();
Op->Kind = k_MemoryZeroImmImm;
- Op->Mem.Base = 0;
- Op->Mem.IndexReg = 0;
+ Op->Mem.Base = MCRegister();
+ Op->Mem.IndexReg = MCRegister();
Op->Mem.Index = Index;
Op->Mem.Offset = Imm;
return Op;
@@ -815,14 +817,14 @@ bool VEAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
/// Parses a register name using a given matching function.
/// Checks for lowercase or uppercase if necessary.
-int VEAsmParser::parseRegisterName(MCRegister (*matchFn)(StringRef)) {
+MCRegister VEAsmParser::parseRegisterName(MCRegister (*matchFn)(StringRef)) {
StringRef Name = Parser.getTok().getString();
- int RegNum = matchFn(Name);
+ MCRegister RegNum = matchFn(Name);
// GCC supports case insensitive register names. All of the VE registers
// are all lower case.
- if (RegNum == VE::NoRegister) {
+ if (!RegNum) {
RegNum = matchFn(Name.lower());
}
diff --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index bae703b..b9ac5d6 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -459,7 +459,6 @@ void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
@@ -519,10 +518,12 @@ void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
report_fatal_error("Can't store this register to stack slot");
}
-void VEInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
- int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ Register DestReg, int FI,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end())
DL = I->getDebugLoc();
diff --git a/llvm/lib/Target/VE/VEInstrInfo.h b/llvm/lib/Target/VE/VEInstrInfo.h
index 408d3ab..cedf7f2 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.h
+++ b/llvm/lib/Target/VE/VEInstrInfo.h
@@ -92,13 +92,15 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
/// } Stack Spill & Reload
diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index d2e3527..9473e8d 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -387,8 +387,8 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MachineMemOperand *LMMO = *LoadInst->memoperands_begin();
MachineMemOperand *SMMO = *StoreInst->memoperands_begin();
- Register Reg1 = MRI->createVirtualRegister(
- TII->getRegClass(TII->get(NLoadOpcode), 0, TRI));
+ Register Reg1 =
+ MRI->createVirtualRegister(TII->getRegClass(TII->get(NLoadOpcode), 0));
MachineInstr *NewLoad =
BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode),
Reg1)
@@ -553,7 +553,7 @@ void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
}
unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) {
- const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI);
+ const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0);
return TRI->getRegSizeInBits(*TRC) / 8;
}
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 5d19011..2047a53 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -174,8 +174,8 @@ public:
MachineBasicBlock *MBB = MI->getParent();
const DebugLoc &DL = MI->getDebugLoc();
- Register Reg = MRI->createVirtualRegister(
- TII->getRegClass(TII->get(DstOpcode), 0, MRI->getTargetRegisterInfo()));
+ Register Reg =
+ MRI->createVirtualRegister(TII->getRegClass(TII->get(DstOpcode), 0));
MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(DstOpcode), Reg);
for (const MachineOperand &MO : llvm::drop_begin(MI->operands()))
Bld.add(MO);
diff --git a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
index 06f729a..25799f4 100644
--- a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
+++ b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
@@ -206,8 +206,7 @@ void X86FastPreTileConfig::spill(MachineBasicBlock::iterator Before,
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
// Don't need shape information for tile store, becasue it is adjacent to
// the tile def instruction.
- TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, TRI,
- Register());
+ TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, Register());
++NumStores;
// TODO: update DBG_VALUEs
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index a66a321..8bca634 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -3093,8 +3093,8 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI,
- Register(), MachineInstr::FrameSetup);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, Register(),
+ MachineInstr::FrameSetup);
}
return true;
@@ -3166,8 +3166,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
- TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI,
- Register());
+ TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, Register());
}
// Clear the stack slot for spill base pointer register.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5bce539..fa3dce2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -53412,7 +53412,7 @@ static SDValue narrowBitOpRMW(StoreSDNode *St, const SDLoc &DL,
return SDValue();
// SrcVal must be a matching normal load further up the chain.
- auto *Ld = dyn_cast<LoadSDNode>(SrcVal);
+ auto *Ld = dyn_cast<LoadSDNode>(peekThroughBitcasts(SrcVal));
if (!Ld || !ISD::isNormalLoad(Ld) || !Ld->isSimple() ||
Ld->getBasePtr() != St->getBasePtr() ||
Ld->getOffset() != St->getOffset() ||
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 1b748b7..7056497 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -3161,6 +3161,12 @@ multiclass avx512_mask_setop_w<SDPatternOperator Val> {
defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
+// 8-bit mask set operations for AVX512DQ
+let Predicates = [HasDQI] in {
+ defm KSET0B : avx512_mask_setop<VK8, v8i1, immAllZerosV>;
+ defm KSET1B : avx512_mask_setop<VK8, v8i1, immAllOnesV>;
+}
+
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512] in {
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
@@ -3173,6 +3179,34 @@ let Predicates = [HasAVX512] in {
def : Pat<(v1i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK1)>;
}
+// With AVX512DQ, use 8-bit operations for 8-bit masks to avoid setting upper
+// bits
+let Predicates = [HasDQI] in {
+ def : Pat<(v8i1 immAllZerosV), (KSET0B)>;
+ def : Pat<(v8i1 immAllOnesV), (KSET1B)>;
+}
+
+// Optimize bitconvert of all-ones constants to use kxnor instructions
+let Predicates = [HasDQI] in {
+ def : Pat<(v8i1(bitconvert(i8 255))), (KSET1B)>;
+ def : Pat<(v16i1(bitconvert(i16 255))), (COPY_TO_REGCLASS(KSET1B), VK16)>;
+}
+let Predicates = [HasBWI] in {
+ def : Pat<(v32i1(bitconvert(i32 -1))), (KSET1D)>;
+ def : Pat<(v64i1(bitconvert(i64 -1))), (KSET1Q)>;
+}
+// Submask patterns: lower N bits set in larger mask registers
+let Predicates = [HasBWI, HasDQI] in {
+ // v32i1 submasks
+ def : Pat<(v32i1(bitconvert(i32 255))), (COPY_TO_REGCLASS(KSET1B), VK32)>;
+ def : Pat<(v32i1(bitconvert(i32 65535))), (COPY_TO_REGCLASS(KSET1W), VK32)>;
+ // v64i1 submasks
+ def : Pat<(v64i1(bitconvert(i64 255))), (COPY_TO_REGCLASS(KSET1B), VK64)>;
+ def : Pat<(v64i1(bitconvert(i64 65535))), (COPY_TO_REGCLASS(KSET1W), VK64)>;
+ def : Pat<(v64i1(bitconvert(i64 4294967295))), (COPY_TO_REGCLASS(KSET1D),
+ VK64)>;
+}
+
// Patterns for kmask insert_subvector/extract_subvector to/from index=0
multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
RegisterClass RC, ValueType VT> {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 2c6d1af..cb0208a 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -93,10 +93,9 @@ X86InstrInfo::X86InstrInfo(const X86Subtarget &STI)
X86::CATCHRET, (STI.is64Bit() ? X86::RET64 : X86::RET32)),
Subtarget(STI), RI(STI.getTargetTriple()) {}
-const TargetRegisterClass *
-X86InstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum, TRI);
+const TargetRegisterClass *X86InstrInfo::getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const {
+ auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum);
// If the target does not have egpr, then r16-r31 will be resereved for all
// instructions.
if (!RC || !Subtarget.hasEGPR())
@@ -789,9 +788,11 @@ bool X86InstrInfo::isReMaterializableImpl(
case X86::FsFLD0SS:
case X86::FsFLD0SH:
case X86::FsFLD0F128:
+ case X86::KSET0B:
case X86::KSET0D:
case X86::KSET0Q:
case X86::KSET0W:
+ case X86::KSET1B:
case X86::KSET1D:
case X86::KSET1Q:
case X86::KSET1W:
@@ -958,8 +959,7 @@ bool X86InstrInfo::isReMaterializableImpl(
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const {
+ const MachineInstr &Orig) const {
bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
MachineBasicBlock::LQR_Dead) {
@@ -4782,14 +4782,14 @@ void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
void X86InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIdx) >= RI.getSpillSize(*RC) &&
"Stack slot too small for store");
- unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
+ unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
(RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
@@ -4803,15 +4803,17 @@ void X86InstrInfo::storeRegToStackSlot(
.setMIFlag(Flags);
}
-void X86InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIdx) >= RI.getSpillSize(*RC) &&
"Load size exceeds stack slot");
- unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
+ unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
(RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
@@ -5553,7 +5555,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
return false;
ShouldUpdateCC = true;
} else if (ImmDelta != 0) {
- unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
+ unsigned BitWidth = RI.getRegSizeInBits(*MRI->getRegClass(SrcReg));
// Shift amount for min/max constants to adjust for 8/16/32 instruction
// sizes.
switch (OldCC) {
@@ -6352,12 +6354,16 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
// registers, since it is not usable as a write mask.
// FIXME: A more advanced approach would be to choose the best input mask
// register based on context.
+ case X86::KSET0B:
+ return Expand2AddrKreg(MIB, get(X86::KXORBkk), X86::K0);
case X86::KSET0W:
return Expand2AddrKreg(MIB, get(X86::KXORWkk), X86::K0);
case X86::KSET0D:
return Expand2AddrKreg(MIB, get(X86::KXORDkk), X86::K0);
case X86::KSET0Q:
return Expand2AddrKreg(MIB, get(X86::KXORQkk), X86::K0);
+ case X86::KSET1B:
+ return Expand2AddrKreg(MIB, get(X86::KXNORBkk), X86::K0);
case X86::KSET1W:
return Expand2AddrKreg(MIB, get(X86::KXNORWkk), X86::K0);
case X86::KSET1D:
@@ -7235,7 +7241,6 @@ static void updateOperandRegConstraints(MachineFunction &MF,
MachineInstr &NewMI,
const TargetInstrInfo &TII) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
MachineOperand &MO = NewMI.getOperand(Idx);
@@ -7247,7 +7252,7 @@ static void updateOperandRegConstraints(MachineFunction &MF,
continue;
auto *NewRC =
- MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI));
+ MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx));
if (!NewRC) {
LLVM_DEBUG(
dbgs() << "WARNING: Unable to update register constraint for operand "
@@ -7345,7 +7350,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
unsigned SrcIdx = (Imm >> 6) & 3;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 &&
(MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) {
@@ -7370,7 +7375,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
unsigned NewOpCode =
@@ -7389,7 +7394,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// table twice.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
MachineInstr *NewMI =
@@ -7524,7 +7529,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
bool NarrowToMOV32rm = false;
if (Size) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -8118,9 +8123,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass;
};
- if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1, &RI)))
+ if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1)))
MaskReg = Op1.getReg();
- else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2, &RI)))
+ else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2)))
MaskReg = Op2.getReg();
if (MaskReg) {
@@ -8524,7 +8529,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const MCInstrDesc &MCID = get(Opc);
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index);
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
// TODO: Check if 32-byte or greater accesses are slow too?
if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
@@ -8635,7 +8640,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the store instruction.
if (UnfoldStore) {
- const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI);
+ const TargetRegisterClass *DstRC = getRegClass(MCID, 0);
auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
@@ -8667,7 +8672,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
@@ -8718,7 +8723,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = nullptr;
if (MCID.getNumDefs() > 0) {
- DstRC = getRegClass(MCID, 0, &RI);
+ DstRC = getRegClass(MCID, 0);
VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 5f75559..a547fcd 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -246,9 +246,8 @@ public:
/// GR*RegClass (definition in TD file)
/// ->
/// GR*_NOREX2RegClass (Returned register class)
- const TargetRegisterClass *
- getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
@@ -343,8 +342,7 @@ public:
bool isReMaterializableImpl(const MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
/// into the right format for a particular kind of LEA instruction. This may
@@ -469,14 +467,14 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 167bed1..c964605 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -359,7 +359,7 @@ bool X86OptimizeLEAPass::chooseBestLEA(
// example MOV8mr_NOREX. We could constrain the register class of the LEA
// def to suit MI, however since this case is very rare and hard to
// reproduce in a test it's just more reliable to skip the LEA.
- if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI) !=
+ if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg) !=
MRI->getRegClass(DefMI->getOperand(0).getReg()))
continue;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index e0b3b61..829a32e 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -54,7 +54,6 @@
#include <cassert>
#include <iterator>
#include <optional>
-#include <utility>
using namespace llvm;
@@ -841,7 +840,7 @@ getRegClassForUnfoldedLoad(const X86InstrInfo &TII, unsigned Opcode) {
unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
- return TII.getRegClass(MCID, Index, &TII.getRegisterInfo());
+ return TII.getRegClass(MCID, Index);
}
void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
diff --git a/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
index 096ad08..0e00db49 100644
--- a/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
+++ b/llvm/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
@@ -69,7 +69,7 @@ static bool readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t Address,
return true;
}
-static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo) {
+static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo) {
const MCRegisterInfo *RegInfo = D->getContext().getRegisterInfo();
return RegInfo->getRegClass(RC).getRegister(RegNo);
}
@@ -79,7 +79,7 @@ static DecodeStatus DecodeGRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
const MCDisassembler *Decoder) {
if (RegNo > 11)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, XCore::GRRegsRegClassID, RegNo);
+ MCRegister Reg = getReg(Decoder, XCore::GRRegsRegClassID, RegNo);
Inst.addOperand(MCOperand::createReg(Reg));
return MCDisassembler::Success;
}
@@ -89,7 +89,7 @@ static DecodeStatus DecodeRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
const MCDisassembler *Decoder) {
if (RegNo > 15)
return MCDisassembler::Fail;
- unsigned Reg = getReg(Decoder, XCore::RRegsRegClassID, RegNo);
+ MCRegister Reg = getReg(Decoder, XCore::RRegsRegClassID, RegNo);
Inst.addOperand(MCOperand::createReg(Reg));
return MCDisassembler::Success;
}
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index cdb5186..351a221 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -432,7 +432,7 @@ bool XCoreFrameLowering::spillCalleeSavedRegisters(
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI,
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC,
Register());
if (emitFrameMoves) {
auto Store = MI;
@@ -458,8 +458,7 @@ bool XCoreFrameLowering::restoreCalleeSavedRegisters(
"LR & FP are always handled in emitEpilogue");
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
- TII.loadRegFromStackSlot(MBB, MI, Reg, CSR.getFrameIdx(), RC, TRI,
- Register());
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSR.getFrameIdx(), RC, Register());
assert(MI != MBB.begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert multiple
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index 80fda34..075910c 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -355,8 +355,8 @@ void XCoreInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void XCoreInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
DebugLoc DL;
if (I != MBB.end() && !I->isDebugInstr())
DL = I->getDebugLoc();
@@ -377,7 +377,6 @@ void XCoreInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
Register VReg,
MachineInstr::MIFlag Flags) const {
DebugLoc DL;
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.h b/llvm/lib/Target/XCore/XCoreInstrInfo.h
index 3543392..c4e399e 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.h
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.h
@@ -71,13 +71,15 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
bool reverseBranchCondition(
diff --git a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
index cf9a2a0..1c0dc66 100644
--- a/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaFrameLowering.cpp
@@ -314,7 +314,7 @@ bool XtensaFrameLowering::spillCalleeSavedRegisters(
bool IsKill = !IsA0AndRetAddrIsTaken;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(EntryBlock, MI, Reg, IsKill, CSI[i].getFrameIdx(),
- RC, TRI, Register());
+ RC, Register());
}
return true;
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
index 6bbebde..d7b05ac 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
@@ -145,8 +145,8 @@ void XtensaInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
void XtensaInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned LoadOpcode, StoreOpcode;
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode, FrameIdx);
@@ -155,10 +155,12 @@ void XtensaInstrInfo::storeRegToStackSlot(
addFrameReference(MIB, FrameIdx);
}
-void XtensaInstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void XtensaInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
unsigned LoadOpcode, StoreOpcode;
getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode, FrameIdx);
@@ -544,12 +546,12 @@ void XtensaInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
"function code size is significantly larger than estimated");
storeRegToStackSlot(MBB, L32R, ScavRegister, /*IsKill=*/true, FrameIndex,
- &Xtensa::ARRegClass, &RI, Register());
+ &Xtensa::ARRegClass, Register());
RI.eliminateFrameIndex(std::prev(L32R.getIterator()),
/*SpAdj=*/0, /*FIOperandNum=*/1);
loadRegFromStackSlot(RestoreBB, RestoreBB.end(), ScavRegister, FrameIndex,
- &Xtensa::ARRegClass, &RI, Register());
+ &Xtensa::ARRegClass, Register());
RI.eliminateFrameIndex(RestoreBB.back(),
/*SpAdj=*/0, /*FIOperandNum=*/1);
JumpToMBB = &RestoreBB;
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h
index 1808cb3..0b46d6c 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.h
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.h
@@ -56,14 +56,13 @@ public:
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
Register DestReg, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
// Get the load and store opcodes for a given register class and offset.