aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp6
-rw-r--r--llvm/lib/Target/X86/X86DomainReassignment.cpp4
-rw-r--r--llvm/lib/Target/X86/X86FastPreTileConfig.cpp3
-rw-r--r--llvm/lib/Target/X86/X86FrameLowering.cpp7
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td34
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp63
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h14
-rw-r--r--llvm/lib/Target/X86/X86OptimizeLEAs.cpp2
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp3
10 files changed, 86 insertions, 52 deletions
diff --git a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
index d2e3527..9473e8d 100644
--- a/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
+++ b/llvm/lib/Target/X86/X86AvoidStoreForwardingBlocks.cpp
@@ -387,8 +387,8 @@ void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode,
MachineMemOperand *LMMO = *LoadInst->memoperands_begin();
MachineMemOperand *SMMO = *StoreInst->memoperands_begin();
- Register Reg1 = MRI->createVirtualRegister(
- TII->getRegClass(TII->get(NLoadOpcode), 0, TRI));
+ Register Reg1 =
+ MRI->createVirtualRegister(TII->getRegClass(TII->get(NLoadOpcode), 0));
MachineInstr *NewLoad =
BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode),
Reg1)
@@ -553,7 +553,7 @@ void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) {
}
unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) {
- const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI);
+ const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0);
return TRI->getRegSizeInBits(*TRC) / 8;
}
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index 5d19011..2047a53 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -174,8 +174,8 @@ public:
MachineBasicBlock *MBB = MI->getParent();
const DebugLoc &DL = MI->getDebugLoc();
- Register Reg = MRI->createVirtualRegister(
- TII->getRegClass(TII->get(DstOpcode), 0, MRI->getTargetRegisterInfo()));
+ Register Reg =
+ MRI->createVirtualRegister(TII->getRegClass(TII->get(DstOpcode), 0));
MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(DstOpcode), Reg);
for (const MachineOperand &MO : llvm::drop_begin(MI->operands()))
Bld.add(MO);
diff --git a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
index 06f729a..25799f4 100644
--- a/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
+++ b/llvm/lib/Target/X86/X86FastPreTileConfig.cpp
@@ -206,8 +206,7 @@ void X86FastPreTileConfig::spill(MachineBasicBlock::iterator Before,
const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
// Don't need shape information for tile store, becasue it is adjacent to
// the tile def instruction.
- TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, TRI,
- Register());
+ TII->storeRegToStackSlot(*MBB, Before, VirtReg, Kill, FI, &RC, Register());
++NumStores;
// TODO: update DBG_VALUEs
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index a66a321..8bca634 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -3093,8 +3093,8 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
MBB.addLiveIn(Reg);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
- TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI,
- Register(), MachineInstr::FrameSetup);
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, Register(),
+ MachineInstr::FrameSetup);
}
return true;
@@ -3166,8 +3166,7 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(
VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
- TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI,
- Register());
+ TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, Register());
}
// Clear the stack slot for spill base pointer register.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5bce539..fa3dce2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -53412,7 +53412,7 @@ static SDValue narrowBitOpRMW(StoreSDNode *St, const SDLoc &DL,
return SDValue();
// SrcVal must be a matching normal load further up the chain.
- auto *Ld = dyn_cast<LoadSDNode>(SrcVal);
+ auto *Ld = dyn_cast<LoadSDNode>(peekThroughBitcasts(SrcVal));
if (!Ld || !ISD::isNormalLoad(Ld) || !Ld->isSimple() ||
Ld->getBasePtr() != St->getBasePtr() ||
Ld->getOffset() != St->getOffset() ||
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 1b748b7..7056497 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -3161,6 +3161,12 @@ multiclass avx512_mask_setop_w<SDPatternOperator Val> {
defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
+// 8-bit mask set operations for AVX512DQ
+let Predicates = [HasDQI] in {
+ defm KSET0B : avx512_mask_setop<VK8, v8i1, immAllZerosV>;
+ defm KSET1B : avx512_mask_setop<VK8, v8i1, immAllOnesV>;
+}
+
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
let Predicates = [HasAVX512] in {
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
@@ -3173,6 +3179,34 @@ let Predicates = [HasAVX512] in {
def : Pat<(v1i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK1)>;
}
+// With AVX512DQ, use 8-bit operations for 8-bit masks to avoid setting upper
+// bits
+let Predicates = [HasDQI] in {
+ def : Pat<(v8i1 immAllZerosV), (KSET0B)>;
+ def : Pat<(v8i1 immAllOnesV), (KSET1B)>;
+}
+
+// Optimize bitconvert of all-ones constants to use kxnor instructions
+let Predicates = [HasDQI] in {
+ def : Pat<(v8i1(bitconvert(i8 255))), (KSET1B)>;
+ def : Pat<(v16i1(bitconvert(i16 255))), (COPY_TO_REGCLASS(KSET1B), VK16)>;
+}
+let Predicates = [HasBWI] in {
+ def : Pat<(v32i1(bitconvert(i32 -1))), (KSET1D)>;
+ def : Pat<(v64i1(bitconvert(i64 -1))), (KSET1Q)>;
+}
+// Submask patterns: lower N bits set in larger mask registers
+let Predicates = [HasBWI, HasDQI] in {
+ // v32i1 submasks
+ def : Pat<(v32i1(bitconvert(i32 255))), (COPY_TO_REGCLASS(KSET1B), VK32)>;
+ def : Pat<(v32i1(bitconvert(i32 65535))), (COPY_TO_REGCLASS(KSET1W), VK32)>;
+ // v64i1 submasks
+ def : Pat<(v64i1(bitconvert(i64 255))), (COPY_TO_REGCLASS(KSET1B), VK64)>;
+ def : Pat<(v64i1(bitconvert(i64 65535))), (COPY_TO_REGCLASS(KSET1W), VK64)>;
+ def : Pat<(v64i1(bitconvert(i64 4294967295))), (COPY_TO_REGCLASS(KSET1D),
+ VK64)>;
+}
+
// Patterns for kmask insert_subvector/extract_subvector to/from index=0
multiclass operation_subvector_mask_lowering<RegisterClass subRC, ValueType subVT,
RegisterClass RC, ValueType VT> {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 2c6d1af..cb0208a 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -93,10 +93,9 @@ X86InstrInfo::X86InstrInfo(const X86Subtarget &STI)
X86::CATCHRET, (STI.is64Bit() ? X86::RET64 : X86::RET32)),
Subtarget(STI), RI(STI.getTargetTriple()) {}
-const TargetRegisterClass *
-X86InstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
- auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum, TRI);
+const TargetRegisterClass *X86InstrInfo::getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const {
+ auto *RC = TargetInstrInfo::getRegClass(MCID, OpNum);
// If the target does not have egpr, then r16-r31 will be resereved for all
// instructions.
if (!RC || !Subtarget.hasEGPR())
@@ -789,9 +788,11 @@ bool X86InstrInfo::isReMaterializableImpl(
case X86::FsFLD0SS:
case X86::FsFLD0SH:
case X86::FsFLD0F128:
+ case X86::KSET0B:
case X86::KSET0D:
case X86::KSET0Q:
case X86::KSET0W:
+ case X86::KSET1B:
case X86::KSET1D:
case X86::KSET1Q:
case X86::KSET1W:
@@ -958,8 +959,7 @@ bool X86InstrInfo::isReMaterializableImpl(
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const {
+ const MachineInstr &Orig) const {
bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
if (ClobbersEFLAGS && MBB.computeRegisterLiveness(&TRI, X86::EFLAGS, I) !=
MachineBasicBlock::LQR_Dead) {
@@ -4782,14 +4782,14 @@ void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
void X86InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+
+ Register VReg, MachineInstr::MIFlag Flags) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIdx) >= RI.getSpillSize(*RC) &&
"Stack slot too small for store");
- unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
+ unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
(RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
@@ -4803,15 +4803,17 @@ void X86InstrInfo::storeRegToStackSlot(
.setMIFlag(Flags);
}
-void X86InstrInfo::loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ Register VReg,
+ MachineInstr::MIFlag Flags) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
- assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
+ assert(MFI.getObjectSize(FrameIdx) >= RI.getSpillSize(*RC) &&
"Load size exceeds stack slot");
- unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
+ unsigned Alignment = std::max<uint32_t>(RI.getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlign() >= Alignment) ||
(RI.canRealignStack(MF) && !MFI.isFixedObjectIndex(FrameIdx));
@@ -5553,7 +5555,7 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
return false;
ShouldUpdateCC = true;
} else if (ImmDelta != 0) {
- unsigned BitWidth = TRI->getRegSizeInBits(*MRI->getRegClass(SrcReg));
+ unsigned BitWidth = RI.getRegSizeInBits(*MRI->getRegClass(SrcReg));
// Shift amount for min/max constants to adjust for 8/16/32 instruction
// sizes.
switch (OldCC) {
@@ -6352,12 +6354,16 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
// registers, since it is not usable as a write mask.
// FIXME: A more advanced approach would be to choose the best input mask
// register based on context.
+ case X86::KSET0B:
+ return Expand2AddrKreg(MIB, get(X86::KXORBkk), X86::K0);
case X86::KSET0W:
return Expand2AddrKreg(MIB, get(X86::KXORWkk), X86::K0);
case X86::KSET0D:
return Expand2AddrKreg(MIB, get(X86::KXORDkk), X86::K0);
case X86::KSET0Q:
return Expand2AddrKreg(MIB, get(X86::KXORQkk), X86::K0);
+ case X86::KSET1B:
+ return Expand2AddrKreg(MIB, get(X86::KXNORBkk), X86::K0);
case X86::KSET1W:
return Expand2AddrKreg(MIB, get(X86::KXNORWkk), X86::K0);
case X86::KSET1D:
@@ -7235,7 +7241,6 @@ static void updateOperandRegConstraints(MachineFunction &MF,
MachineInstr &NewMI,
const TargetInstrInfo &TII) {
MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
MachineOperand &MO = NewMI.getOperand(Idx);
@@ -7247,7 +7252,7 @@ static void updateOperandRegConstraints(MachineFunction &MF,
continue;
auto *NewRC =
- MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI));
+ MRI.constrainRegClass(Reg, TII.getRegClass(NewMI.getDesc(), Idx));
if (!NewRC) {
LLVM_DEBUG(
dbgs() << "WARNING: Unable to update register constraint for operand "
@@ -7345,7 +7350,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
unsigned SrcIdx = (Imm >> 6) & 3;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 &&
(MI.getOpcode() != X86::INSERTPSrri || Alignment >= Align(4))) {
@@ -7370,7 +7375,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) {
unsigned NewOpCode =
@@ -7389,7 +7394,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
// table twice.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) {
MachineInstr *NewMI =
@@ -7524,7 +7529,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
bool NarrowToMOV32rm = false;
if (Size) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI);
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -8118,9 +8123,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
RC == &X86::VK32WMRegClass || RC == &X86::VK64WMRegClass;
};
- if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1, &RI)))
+ if (Op1.isReg() && IsVKWMClass(getRegClass(MCID, 1)))
MaskReg = Op1.getReg();
- else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2, &RI)))
+ else if (Op2.isReg() && IsVKWMClass(getRegClass(MCID, 2)))
MaskReg = Op2.getReg();
if (MaskReg) {
@@ -8524,7 +8529,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const MCInstrDesc &MCID = get(Opc);
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index);
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
// TODO: Check if 32-byte or greater accesses are slow too?
if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
@@ -8635,7 +8640,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
// Emit the store instruction.
if (UnfoldStore) {
- const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI);
+ const TargetRegisterClass *DstRC = getRegClass(MCID, 0);
auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*DstRC), 16);
bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment;
@@ -8667,7 +8672,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI);
+ const TargetRegisterClass *RC = getRegClass(MCID, Index);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
@@ -8718,7 +8723,7 @@ bool X86InstrInfo::unfoldMemoryOperand(
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = nullptr;
if (MCID.getNumDefs() > 0) {
- DstRC = getRegClass(MCID, 0, &RI);
+ DstRC = getRegClass(MCID, 0);
VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 5f75559..a547fcd 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -246,9 +246,8 @@ public:
/// GR*RegClass (definition in TD file)
/// ->
/// GR*_NOREX2RegClass (Returned register class)
- const TargetRegisterClass *
- getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const override;
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID,
+ unsigned OpNum) const override;
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
@@ -343,8 +342,7 @@ public:
bool isReMaterializableImpl(const MachineInstr &MI) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const override;
+ const MachineInstr &Orig) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
/// into the right format for a particular kind of LEA instruction. This may
@@ -469,14 +467,14 @@ public:
bool RenamableSrc = false) const override;
void storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+ bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
+
+ Register VReg,
MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 167bed1..c964605 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -359,7 +359,7 @@ bool X86OptimizeLEAPass::chooseBestLEA(
// example MOV8mr_NOREX. We could constrain the register class of the LEA
// def to suit MI, however since this case is very rare and hard to
// reproduce in a test it's just more reliable to skip the LEA.
- if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI) !=
+ if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg) !=
MRI->getRegClass(DefMI->getOperand(0).getReg()))
continue;
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index e0b3b61..829a32e 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -54,7 +54,6 @@
#include <cassert>
#include <iterator>
#include <optional>
-#include <utility>
using namespace llvm;
@@ -841,7 +840,7 @@ getRegClassForUnfoldedLoad(const X86InstrInfo &TII, unsigned Opcode) {
unsigned UnfoldedOpc = TII.getOpcodeAfterMemoryUnfold(
Opcode, /*UnfoldLoad*/ true, /*UnfoldStore*/ false, &Index);
const MCInstrDesc &MCID = TII.get(UnfoldedOpc);
- return TII.getRegClass(MCID, Index, &TII.getRegisterInfo());
+ return TII.getRegClass(MCID, Index);
}
void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(