aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64Combine.td2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp83
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp91
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp34
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp22
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h1
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.cpp8
-rw-r--r--llvm/lib/Target/AMDGPU/GCNRegPressure.h4
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp32
-rw-r--r--llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp12
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp6
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h3
-rw-r--r--llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.h3
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.td16
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp33
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h1
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.td7
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoD.td73
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoF.td21
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td23
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td8
-rw-r--r--llvm/lib/Target/SPIRV/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCombine.td4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp60
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h38
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp77
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp1
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp1
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h3
-rw-r--r--llvm/lib/Target/X86/X86ISelLoweringCall.cpp2
38 files changed, 438 insertions, 249 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 639ddcb..ecaeff7 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -350,7 +350,7 @@ def AArch64PostLegalizerLowering
// Post-legalization combines which are primarily optimizations.
def AArch64PostLegalizerCombiner
: GICombiner<"AArch64PostLegalizerCombinerImpl",
- [copy_prop, cast_of_cast_combines,
+ [copy_prop, cast_of_cast_combines, constant_fold_fp_ops,
buildvector_of_truncate, integer_of_truncate,
mutate_anyext_to_zext, combines_for_extload,
combine_indexed_load_store, sext_trunc_sextload,
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 31b3d18..fbce3b0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16249,7 +16249,9 @@ SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
bool Negated;
uint64_t SplatVal;
- if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
+ // NOTE: SRAD cannot be used to represent sdiv-by-one.
+ if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated) &&
+ SplatVal > 1) {
SDValue Pg = getPredicateForScalableVector(DAG, DL, VT);
SDValue Res =
DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, DL, VT, Pg, Op->getOperand(0),
@@ -18638,7 +18640,7 @@ bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
}
bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
- const SDNode *N, CombineLevel Level) const {
+ const SDNode *N) const {
assert(((N->getOpcode() == ISD::SHL &&
N->getOperand(0).getOpcode() == ISD::SRL) ||
(N->getOpcode() == ISD::SRL &&
@@ -30034,7 +30036,9 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
bool Negated;
uint64_t SplatVal;
- if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) {
+ // NOTE: SRAD cannot be used to represent sdiv-by-one.
+ if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated) &&
+ SplatVal > 1) {
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), DL, MVT::i32);
@@ -30606,6 +30610,43 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
assert(OpVT.isScalableVector() &&
"Expected scalable vector in LowerVECTOR_DEINTERLEAVE.");
+ if (Op->getNumOperands() == 3) {
+ // aarch64_sve_ld3 only supports packed datatypes.
+ EVT PackedVT = getPackedSVEVectorVT(OpVT.getVectorElementCount());
+ Align Alignment = DAG.getReducedAlign(PackedVT, /*UseABI=*/false);
+ SDValue StackPtr =
+ DAG.CreateStackTemporary(PackedVT.getStoreSize() * 3, Alignment);
+
+ // Write out unmodified operands.
+ SmallVector<SDValue, 3> Chains;
+ for (unsigned I = 0; I < 3; ++I) {
+ SDValue Ptr =
+ DAG.getMemBasePlusOffset(StackPtr, PackedVT.getStoreSize() * I, DL);
+ SDValue V = getSVESafeBitCast(PackedVT, Op.getOperand(I), DAG);
+ Chains.push_back(
+ DAG.getStore(DAG.getEntryNode(), DL, V, Ptr, MachinePointerInfo()));
+ }
+
+ Intrinsic::ID IntID = Intrinsic::aarch64_sve_ld3_sret;
+ EVT PredVT = PackedVT.changeVectorElementType(MVT::i1);
+
+ SmallVector<SDValue, 7> Ops;
+ Ops.push_back(DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains));
+ Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64));
+ Ops.push_back(DAG.getConstant(1, DL, PredVT));
+ Ops.push_back(StackPtr);
+
+ // Read back and deinterleave data.
+ SDVTList VTs = DAG.getVTList(PackedVT, PackedVT, PackedVT, MVT::Other);
+ SDValue LD3 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops);
+
+ SmallVector<SDValue, 3> Results;
+ Results.push_back(getSVESafeBitCast(OpVT, LD3.getValue(0), DAG));
+ Results.push_back(getSVESafeBitCast(OpVT, LD3.getValue(1), DAG));
+ Results.push_back(getSVESafeBitCast(OpVT, LD3.getValue(2), DAG));
+ return DAG.getMergeValues(Results, DL);
+ }
+
// Are multi-register uzp instructions available?
if (Subtarget->hasSME2() && Subtarget->isStreaming() &&
OpVT.getVectorElementType() != MVT::i1) {
@@ -30647,6 +30688,42 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
assert(OpVT.isScalableVector() &&
"Expected scalable vector in LowerVECTOR_INTERLEAVE.");
+ if (Op->getNumOperands() == 3) {
+ // aarch64_sve_st3 only supports packed datatypes.
+ EVT PackedVT = getPackedSVEVectorVT(OpVT.getVectorElementCount());
+ SmallVector<SDValue, 3> InVecs;
+ for (SDValue V : Op->ops())
+ InVecs.push_back(getSVESafeBitCast(PackedVT, V, DAG));
+
+ Align Alignment = DAG.getReducedAlign(PackedVT, /*UseABI=*/false);
+ SDValue StackPtr =
+ DAG.CreateStackTemporary(PackedVT.getStoreSize() * 3, Alignment);
+
+ Intrinsic::ID IntID = Intrinsic::aarch64_sve_st3;
+ EVT PredVT = PackedVT.changeVectorElementType(MVT::i1);
+
+ SmallVector<SDValue, 7> Ops;
+ Ops.push_back(DAG.getEntryNode());
+ Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64));
+ Ops.append(InVecs);
+ Ops.push_back(DAG.getConstant(1, DL, PredVT));
+ Ops.push_back(StackPtr);
+
+ // Interleave operands and store.
+ SDValue Chain = DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops);
+
+ // Read back the interleaved data.
+ SmallVector<SDValue, 3> Results;
+ for (unsigned I = 0; I < 3; ++I) {
+ SDValue Ptr =
+ DAG.getMemBasePlusOffset(StackPtr, PackedVT.getStoreSize() * I, DL);
+ SDValue L = DAG.getLoad(PackedVT, DL, Chain, Ptr, MachinePointerInfo());
+ Results.push_back(getSVESafeBitCast(OpVT, L, DAG));
+ }
+
+ return DAG.getMergeValues(Results, DL);
+ }
+
// Are multi-register zip instructions available?
if (Subtarget->hasSME2() && Subtarget->isStreaming() &&
OpVT.getVectorElementType() != MVT::i1) {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index e472e7d..00956fd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -300,8 +300,7 @@ public:
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
/// Return true if it is profitable to fold a pair of shifts into a mask.
- bool shouldFoldConstantShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
+ bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
/// Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 24bef82..8e35ba7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -15,6 +15,7 @@
#include "AMDGPU.h"
#include "AMDGPUTargetMachine.h"
#include "SIModeRegisterDefaults.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
@@ -27,6 +28,7 @@
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/KnownBits.h"
@@ -106,6 +108,7 @@ public:
bool FlowChanged = false;
mutable Function *SqrtF32 = nullptr;
mutable Function *LdexpF32 = nullptr;
+ mutable SmallVector<WeakVH> DeadVals;
DenseMap<const PHINode *, bool> BreakPhiNodesCache;
@@ -242,6 +245,8 @@ public:
Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src,
FastMathFlags FMF) const;
+ bool tryNarrowMathIfNoOverflow(Instruction *I);
+
public:
bool visitFDiv(BinaryOperator &I);
@@ -281,28 +286,21 @@ bool AMDGPUCodeGenPrepareImpl::run() {
BreakPhiNodesCache.clear();
bool MadeChange = false;
- Function::iterator NextBB;
- for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
- BasicBlock *BB = &*FI;
- NextBB = std::next(FI);
-
- BasicBlock::iterator Next;
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
- I = Next) {
- Next = std::next(I);
-
- MadeChange |= visit(*I);
-
- if (Next != E) { // Control flow changed
- BasicBlock *NextInstBB = Next->getParent();
- if (NextInstBB != BB) {
- BB = NextInstBB;
- E = BB->end();
- FE = F.end();
- }
- }
+ // Need to use make_early_inc_range because integer division expansion is
+ // handled by Transform/Utils, and it can delete instructions such as the
+ // terminator of the BB.
+ for (BasicBlock &BB : reverse(F)) {
+ for (Instruction &I : make_early_inc_range(reverse(BB))) {
+ if (!isInstructionTriviallyDead(&I, TLI))
+ MadeChange |= visit(I);
}
}
+
+ while (!DeadVals.empty()) {
+ if (auto *I = dyn_cast_or_null<Instruction>(DeadVals.pop_back_val()))
+ RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
+ }
+
return MadeChange;
}
@@ -422,7 +420,7 @@ bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const {
Value *NewVal = insertValues(Builder, Ty, ResultVals);
NewVal->takeName(&I);
I.replaceAllUsesWith(NewVal);
- I.eraseFromParent();
+ DeadVals.push_back(&I);
return true;
}
@@ -496,10 +494,10 @@ bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
FoldedT, FoldedF);
NewSelect->takeName(&BO);
BO.replaceAllUsesWith(NewSelect);
- BO.eraseFromParent();
+ DeadVals.push_back(&BO);
if (CastOp)
- CastOp->eraseFromParent();
- Sel->eraseFromParent();
+ DeadVals.push_back(CastOp);
+ DeadVals.push_back(Sel);
return true;
}
@@ -895,7 +893,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
if (NewVal) {
FDiv.replaceAllUsesWith(NewVal);
NewVal->takeName(&FDiv);
- RecursivelyDeleteTriviallyDeadInstructions(&FDiv, TLI);
+ DeadVals.push_back(&FDiv);
}
return true;
@@ -1302,10 +1300,7 @@ it will create `s_and_b32 s0, s0, 0xff`.
We accept this change since the non-byte load assumes the upper bits
within the byte are all 0.
*/
-static bool tryNarrowMathIfNoOverflow(Instruction *I,
- const SITargetLowering *TLI,
- const TargetTransformInfo &TTI,
- const DataLayout &DL) {
+bool AMDGPUCodeGenPrepareImpl::tryNarrowMathIfNoOverflow(Instruction *I) {
unsigned Opc = I->getOpcode();
Type *OldType = I->getType();
@@ -1330,6 +1325,7 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I,
NewType = I->getType()->getWithNewBitWidth(NewBit);
// Old cost
+ const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F);
InstructionCost OldCost =
TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput);
// New cost of new op
@@ -1360,7 +1356,7 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I,
Value *Zext = Builder.CreateZExt(Arith, OldType);
I->replaceAllUsesWith(Zext);
- I->eraseFromParent();
+ DeadVals.push_back(I);
return true;
}
@@ -1370,8 +1366,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if (UseMul24Intrin && replaceMulWithMul24(I))
return true;
- if (tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(),
- TM.getTargetTransformInfo(F), DL))
+ if (tryNarrowMathIfNoOverflow(&I))
return true;
bool Changed = false;
@@ -1436,7 +1431,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if (NewDiv) {
I.replaceAllUsesWith(NewDiv);
- I.eraseFromParent();
+ DeadVals.push_back(&I);
Changed = true;
}
}
@@ -1492,7 +1487,7 @@ bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
I.replaceAllUsesWith(ValOrig);
- I.eraseFromParent();
+ DeadVals.push_back(&I);
return true;
}
@@ -1534,7 +1529,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) {
Fract->takeName(&I);
I.replaceAllUsesWith(Fract);
- RecursivelyDeleteTriviallyDeadInstructions(&I, TLI);
+ DeadVals.push_back(&I);
return true;
}
@@ -1822,7 +1817,7 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
}
I.replaceAllUsesWith(Vec);
- I.eraseFromParent();
+ DeadVals.push_back(&I);
return true;
}
@@ -1903,7 +1898,7 @@ bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
auto *Intrin = B.CreateIntrinsic(
I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)});
I.replaceAllUsesWith(Intrin);
- I.eraseFromParent();
+ DeadVals.push_back(&I);
return true;
}
@@ -2000,16 +1995,10 @@ bool AMDGPUCodeGenPrepareImpl::visitFMinLike(IntrinsicInst &I) {
Value *Fract = applyFractPat(Builder, FractArg);
Fract->takeName(&I);
I.replaceAllUsesWith(Fract);
-
- RecursivelyDeleteTriviallyDeadInstructions(&I, TLI);
+ DeadVals.push_back(&I);
return true;
}
-static bool isOneOrNegOne(const Value *Val) {
- const APFloat *C;
- return match(Val, m_APFloat(C)) && C->getExactLog2Abs() == 0;
-}
-
// Expand llvm.sqrt.f32 calls with !fpmath metadata in a semi-fast way.
bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
Type *Ty = Sqrt.getType()->getScalarType();
@@ -2030,18 +2019,6 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
if (ReqdAccuracy < 1.0f)
return false;
- // FIXME: This is an ugly hack for this pass using forward iteration instead
- // of reverse. If it worked like a normal combiner, the rsq would form before
- // we saw a sqrt call.
- auto *FDiv =
- dyn_cast_or_null<FPMathOperator>(Sqrt.getUniqueUndroppableUser());
- if (FDiv && FDiv->getOpcode() == Instruction::FDiv &&
- FDiv->getFPAccuracy() >= 1.0f &&
- canOptimizeWithRsq(FPOp, FDiv->getFastMathFlags(), SqrtFMF) &&
- // TODO: We should also handle the arcp case for the fdiv with non-1 value
- isOneOrNegOne(FDiv->getOperand(0)))
- return false;
-
Value *SrcVal = Sqrt.getOperand(0);
bool CanTreatAsDAZ = canIgnoreDenormalInput(SrcVal, &Sqrt);
@@ -2065,7 +2042,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
Value *NewSqrt = insertValues(Builder, Sqrt.getType(), ResultVals);
NewSqrt->takeName(&Sqrt);
Sqrt.replaceAllUsesWith(NewSqrt);
- Sqrt.eraseFromParent();
+ DeadVals.push_back(&Sqrt);
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
index 73b2660..5407566 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
@@ -468,6 +468,38 @@ void RegBankLegalizeHelper::lowerUnpackBitShift(MachineInstr &MI) {
MI.eraseFromParent();
}
+void RegBankLegalizeHelper::lowerUnpackMinMax(MachineInstr &MI) {
+ Register Lo, Hi;
+ switch (MI.getOpcode()) {
+ case AMDGPU::G_SMIN:
+ case AMDGPU::G_SMAX: {
+ // For signed operations, use sign extension
+ auto [Val0_Lo, Val0_Hi] = unpackSExt(MI.getOperand(1).getReg());
+ auto [Val1_Lo, Val1_Hi] = unpackSExt(MI.getOperand(2).getReg());
+ Lo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Lo, Val1_Lo})
+ .getReg(0);
+ Hi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Hi, Val1_Hi})
+ .getReg(0);
+ break;
+ }
+ case AMDGPU::G_UMIN:
+ case AMDGPU::G_UMAX: {
+ // For unsigned operations, use zero extension
+ auto [Val0_Lo, Val0_Hi] = unpackZExt(MI.getOperand(1).getReg());
+ auto [Val1_Lo, Val1_Hi] = unpackZExt(MI.getOperand(2).getReg());
+ Lo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Lo, Val1_Lo})
+ .getReg(0);
+ Hi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Hi, Val1_Hi})
+ .getReg(0);
+ break;
+ }
+ default:
+ llvm_unreachable("Unpack min/max lowering not implemented");
+ }
+ B.buildBuildVectorTrunc(MI.getOperand(0).getReg(), {Lo, Hi});
+ MI.eraseFromParent();
+}
+
static bool isSignedBFE(MachineInstr &MI) {
if (GIntrinsic *GI = dyn_cast<GIntrinsic>(&MI))
return (GI->is(Intrinsic::amdgcn_sbfe));
@@ -654,6 +686,8 @@ void RegBankLegalizeHelper::lower(MachineInstr &MI,
}
case UnpackBitShift:
return lowerUnpackBitShift(MI);
+ case UnpackMinMax:
+ return lowerUnpackMinMax(MI);
case Ext32To64: {
const RegisterBank *RB = MRI.getRegBank(MI.getOperand(0).getReg());
MachineInstrBuilder Hi;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
index 7affe5a..d937815 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
@@ -123,6 +123,7 @@ private:
void lowerSplitTo32(MachineInstr &MI);
void lowerSplitTo32Select(MachineInstr &MI);
void lowerSplitTo32SExtInReg(MachineInstr &MI);
+ void lowerUnpackMinMax(MachineInstr &MI);
};
} // end namespace AMDGPU
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
index f413bbc..bfe2c80 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
@@ -522,6 +522,22 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Uni(S64, {{Sgpr64}, {Sgpr64, Sgpr32, Sgpr32}, S_BFE})
.Div(S64, {{Vgpr64}, {Vgpr64, Vgpr32, Vgpr32}, V_BFE});
+ addRulesForGOpcs({G_SMIN, G_SMAX}, Standard)
+ .Uni(S16, {{Sgpr32Trunc}, {Sgpr32SExt, Sgpr32SExt}})
+ .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}})
+ .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}})
+ .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}})
+ .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackMinMax})
+ .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}});
+
+ addRulesForGOpcs({G_UMIN, G_UMAX}, Standard)
+ .Uni(S16, {{Sgpr32Trunc}, {Sgpr32ZExt, Sgpr32ZExt}})
+ .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}})
+ .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}})
+ .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}})
+ .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackMinMax})
+ .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}});
+
// Note: we only write S1 rules for G_IMPLICIT_DEF, G_CONSTANT, G_FCONSTANT
// and G_FREEZE here, rest is trivially regbankselected earlier
addRulesForGOpcs({G_IMPLICIT_DEF}).Any({{UniS1}, {{Sgpr32Trunc}, {}}});
@@ -617,6 +633,12 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Any({{UniS64, S64}, {{Sgpr64}, {Sgpr64}}})
.Any({{DivS64, S64}, {{Vgpr64}, {Vgpr64}, SplitTo32SExtInReg}});
+ addRulesForGOpcs({G_ASSERT_ZEXT, G_ASSERT_SEXT}, Standard)
+ .Uni(S32, {{Sgpr32}, {Sgpr32, Imm}})
+ .Div(S32, {{Vgpr32}, {Vgpr32, Imm}})
+ .Uni(S64, {{Sgpr64}, {Sgpr64, Imm}})
+ .Div(S64, {{Vgpr64}, {Vgpr64, Imm}});
+
bool hasSMRDx3 = ST->hasScalarDwordx3Loads();
bool hasSMRDSmall = ST->hasScalarSubwordLoads();
bool usesTrue16 = ST->useRealTrue16Insts();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
index d0c6910..93e0efd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
@@ -212,6 +212,7 @@ enum LoweringMethodID {
VccExtToSel,
UniExtToSel,
UnpackBitShift,
+ UnpackMinMax,
S_BFE,
V_BFE,
VgprToVccCopy,
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
index ef63acc..71494be 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -905,7 +905,7 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) {
OS << ":\n";
SlotIndex MBBStartSlot = LIS.getSlotIndexes()->getMBBStartIdx(&MBB);
- SlotIndex MBBEndSlot = LIS.getSlotIndexes()->getMBBEndIdx(&MBB);
+ SlotIndex MBBLastSlot = LIS.getSlotIndexes()->getMBBLastIdx(&MBB);
GCNRPTracker::LiveRegSet LiveIn, LiveOut;
GCNRegPressure RPAtMBBEnd;
@@ -931,7 +931,7 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) {
}
} else {
GCNUpwardRPTracker RPT(LIS);
- RPT.reset(MRI, MBBEndSlot);
+ RPT.reset(MRI, MBBLastSlot);
LiveOut = RPT.getLiveRegs();
RPAtMBBEnd = RPT.getPressure();
@@ -966,14 +966,14 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) {
OS << PFX " Live-out:" << llvm::print(LiveOut, MRI);
if (UseDownwardTracker)
- ReportLISMismatchIfAny(LiveOut, getLiveRegs(MBBEndSlot, LIS, MRI));
+ ReportLISMismatchIfAny(LiveOut, getLiveRegs(MBBLastSlot, LIS, MRI));
GCNRPTracker::LiveRegSet LiveThrough;
for (auto [Reg, Mask] : LiveIn) {
LaneBitmask MaskIntersection = Mask & LiveOut.lookup(Reg);
if (MaskIntersection.any()) {
LaneBitmask LTMask = getRegLiveThroughMask(
- MRI, LIS, Reg, MBBStartSlot, MBBEndSlot, MaskIntersection);
+ MRI, LIS, Reg, MBBStartSlot, MBBLastSlot, MaskIntersection);
if (LTMask.any())
LiveThrough[Reg] = LTMask;
}
diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
index a9c58bb..898d1ff 100644
--- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h
+++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h
@@ -313,8 +313,8 @@ public:
/// reset tracker to the end of the \p MBB.
void reset(const MachineBasicBlock &MBB) {
- reset(MBB.getParent()->getRegInfo(),
- LIS.getSlotIndexes()->getMBBEndIdx(&MBB));
+ SlotIndex MBBLastSlot = LIS.getSlotIndexes()->getMBBLastIdx(&MBB);
+ reset(MBB.getParent()->getRegInfo(), MBBLastSlot);
}
/// reset tracker to the point just after \p MI (in program order).
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 730be69..80e985d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -103,52 +103,52 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::Untyped, V64RegClass);
addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass);
- addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96));
+ addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass);
addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass);
- addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128));
+ addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass);
- addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160));
+ addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass);
addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass);
- addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192));
+ addRegisterClass(MVT::v6f32, &AMDGPU::VReg_192RegClass);
addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass);
- addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192));
+ addRegisterClass(MVT::v3f64, &AMDGPU::VReg_192RegClass);
addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass);
- addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224));
+ addRegisterClass(MVT::v7f32, &AMDGPU::VReg_224RegClass);
addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass);
- addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256));
+ addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass);
- addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256));
+ addRegisterClass(MVT::v4f64, &AMDGPU::VReg_256RegClass);
addRegisterClass(MVT::v9i32, &AMDGPU::SGPR_288RegClass);
- addRegisterClass(MVT::v9f32, TRI->getVGPRClassForBitWidth(288));
+ addRegisterClass(MVT::v9f32, &AMDGPU::VReg_288RegClass);
addRegisterClass(MVT::v10i32, &AMDGPU::SGPR_320RegClass);
- addRegisterClass(MVT::v10f32, TRI->getVGPRClassForBitWidth(320));
+ addRegisterClass(MVT::v10f32, &AMDGPU::VReg_320RegClass);
addRegisterClass(MVT::v11i32, &AMDGPU::SGPR_352RegClass);
- addRegisterClass(MVT::v11f32, TRI->getVGPRClassForBitWidth(352));
+ addRegisterClass(MVT::v11f32, &AMDGPU::VReg_352RegClass);
addRegisterClass(MVT::v12i32, &AMDGPU::SGPR_384RegClass);
- addRegisterClass(MVT::v12f32, TRI->getVGPRClassForBitWidth(384));
+ addRegisterClass(MVT::v12f32, &AMDGPU::VReg_384RegClass);
addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass);
- addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512));
+ addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass);
- addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512));
+ addRegisterClass(MVT::v8f64, &AMDGPU::VReg_512RegClass);
addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass);
- addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024));
+ addRegisterClass(MVT::v16f64, &AMDGPU::VReg_1024RegClass);
if (Subtarget->has16BitInsts()) {
if (Subtarget->useRealTrue16Insts()) {
@@ -180,7 +180,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}
addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass);
- addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024));
+ addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass);
computeRegisterProperties(Subtarget->getRegisterInfo());
diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
index 4d3331a..c684f9e 100644
--- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp
@@ -674,15 +674,9 @@ void SIPreEmitPeephole::performF32Unpacking(MachineInstr &I) {
createUnpackedMI(I, UnpackedOpcode, /*IsHiBits=*/true);
MachineOperand HiDstOp = Op0HOp1H->getOperand(0);
- if (I.getFlag(MachineInstr::MIFlag::NoFPExcept)) {
- Op0LOp1L->setFlag(MachineInstr::MIFlag::NoFPExcept);
- Op0HOp1H->setFlag(MachineInstr::MIFlag::NoFPExcept);
- }
- if (I.getFlag(MachineInstr::MIFlag::FmContract)) {
- Op0LOp1L->setFlag(MachineInstr::MIFlag::FmContract);
- Op0HOp1H->setFlag(MachineInstr::MIFlag::FmContract);
- }
-
+ uint32_t IFlags = I.getFlags();
+ Op0LOp1L->setFlags(IFlags);
+ Op0HOp1H->setFlags(IFlags);
LoDstOp.setIsRenamable(DstOp.isRenamable());
HiDstOp.setIsRenamable(DstOp.isRenamable());
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 2a40fb9..67ea2dd 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -42,7 +42,6 @@
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/ISDOpcodes.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -13817,7 +13816,7 @@ bool ARMTargetLowering::isDesirableToCommuteXorWithShift(
}
bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
- const SDNode *N, CombineLevel Level) const {
+ const SDNode *N) const {
assert(((N->getOpcode() == ISD::SHL &&
N->getOperand(0).getOpcode() == ISD::SRL) ||
(N->getOpcode() == ISD::SRL &&
@@ -13827,7 +13826,8 @@ bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
if (!Subtarget->isThumb1Only())
return true;
- if (Level == BeforeLegalizeTypes)
+ EVT VT = N->getValueType(0);
+ if (VT.getScalarSizeInBits() > 32)
return true;
return false;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 26ff54c..70aa001 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -772,8 +772,7 @@ class VectorType;
bool isDesirableToCommuteXorWithShift(const SDNode *N) const override;
- bool shouldFoldConstantShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
+ bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
/// Return true if it is profitable to fold a pair of shifts into a mask.
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override {
diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index ba70c9e..97379d7 100644
--- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -3677,7 +3677,7 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out,
Out, STI))
return true;
- if (IsLikely) {
+ if (IsLikely && MemOffsetOp.isExpr()) {
TOut.emitRRX(OpCode, DstRegOp.getReg(), ATReg,
MCOperand::createExpr(MemOffsetOp.getExpr()), IDLoc, STI);
TOut.emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI);
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index b05de49..7f1ff45 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -1306,7 +1306,7 @@ bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
}
bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
- const SDNode *N, CombineLevel Level) const {
+ const SDNode *N) const {
assert(((N->getOpcode() == ISD::SHL &&
N->getOperand(0).getOpcode() == ISD::SRL) ||
(N->getOpcode() == ISD::SRL &&
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index c65c76c..25a0bf9 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -290,8 +290,7 @@ class TargetRegisterClass;
bool isCheapToSpeculateCttz(Type *Ty) const override;
bool isCheapToSpeculateCtlz(Type *Ty) const override;
bool hasBitTest(SDValue X, SDValue Y) const override;
- bool shouldFoldConstantShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
+ bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
/// Return the register type for a given MVT, ensuring vectors are treated
/// as a series of gpr sized integers.
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.td b/llvm/lib/Target/Mips/MipsInstrInfo.td
index eff80e5..21d8ded 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -855,6 +855,16 @@ def calltarget : Operand<iPTR> {
def imm64: Operand<i64>;
+def ConstantImmAsmOperandClass : AsmOperandClass {
+ let Name = "ConstantImm";
+ let PredicateMethod = "isConstantImm";
+ let RenderMethod = "addImmOperands";
+}
+
+def ConstantImm64: Operand<i64> {
+ let ParserMatchClass = ConstantImmAsmOperandClass;
+}
+
def simm19_lsl2 : Operand<i32> {
let EncoderMethod = "getSimm19Lsl2Encoding";
let DecoderMethod = "DecodeSimm19Lsl2";
@@ -2947,10 +2957,10 @@ def : MipsInstAlias<"nor\t$rs, $imm", (NORImm GPR32Opnd:$rs, GPR32Opnd:$rs,
let hasDelaySlot = 1, isCTI = 1 in {
def BneImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
- (ins imm64:$imm64, brtarget:$offset),
+ (ins ConstantImm64:$imm64, brtarget:$offset),
"bne\t$rt, $imm64, $offset">;
def BeqImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
- (ins imm64:$imm64, brtarget:$offset),
+ (ins ConstantImm64:$imm64, brtarget:$offset),
"beq\t$rt, $imm64, $offset">;
class CondBranchPseudo<string instr_asm> :
@@ -2978,7 +2988,7 @@ def BGTUL: CondBranchPseudo<"bgtul">, ISA_MIPS2_NOT_32R6_64R6;
let isCTI = 1 in
class CondBranchImmPseudo<string instr_asm> :
- MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, imm64:$imm, brtarget:$offset),
+ MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, ConstantImm64:$imm, brtarget:$offset),
!strconcat(instr_asm, "\t$rs, $imm, $offset")>;
def BEQLImmMacro : CondBranchImmPseudo<"beql">, ISA_MIPS2_NOT_32R6_64R6;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index f692180..944a1e2 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -585,6 +585,10 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
// We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ // Custom handling for PowerPC ucmp instruction
+ setOperationAction(ISD::UCMP, MVT::i32, Custom);
+ setOperationAction(ISD::UCMP, MVT::i64, isPPC64 ? Custom : Expand);
+
// NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
// SjLj exception handling but a light-weight setjmp/longjmp replacement to
// support continuation, user-level threading, and etc.. As a result, no
@@ -12618,6 +12622,33 @@ SDValue PPCTargetLowering::LowerSSUBO(SDValue Op, SelectionDAG &DAG) const {
return DAG.getMergeValues({Sub, OverflowTrunc}, dl);
}
+// Lower unsigned 3-way compare producing -1/0/1.
+SDValue PPCTargetLowering::LowerUCMP(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue A = DAG.getFreeze(Op.getOperand(0));
+ SDValue B = DAG.getFreeze(Op.getOperand(1));
+ EVT OpVT = A.getValueType(); // operand type
+ EVT ResVT = Op.getValueType(); // result type
+
+ // First compute diff = A - B (will become subf).
+ SDValue Diff = DAG.getNode(ISD::SUB, DL, OpVT, A, B);
+
+ // Generate B - A using SUBC to capture carry.
+ SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
+ SDValue SubC = DAG.getNode(PPCISD::SUBC, DL, VTs, B, A);
+ SDValue CA0 = SubC.getValue(1);
+
+ // t2 = A - B + CA0 using SUBE.
+ SDValue SubE1 = DAG.getNode(PPCISD::SUBE, DL, VTs, A, B, CA0);
+ SDValue CA1 = SubE1.getValue(1);
+
+ // res = diff - t2 + CA1 using SUBE (produces desired -1/0/1).
+ SDValue ResPair = DAG.getNode(PPCISD::SUBE, DL, VTs, Diff, SubE1, CA1);
+
+ // Extract the first result and truncate to result type if needed
+ return DAG.getSExtOrTrunc(ResPair.getValue(0), DL, ResVT);
+}
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -12722,6 +12753,8 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::UADDO_CARRY:
case ISD::USUBO_CARRY:
return LowerADDSUBO_CARRY(Op, DAG);
+ case ISD::UCMP:
+ return LowerUCMP(Op, DAG);
}
}
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 6694305..59f3387 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1318,6 +1318,7 @@ namespace llvm {
SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADDSUBO(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerUCMP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerToLibCall(const char *LibCallName, SDValue Op,
SelectionDAG &DAG) const;
SDValue lowerLibCallBasedOnType(const char *LibCallFloatName,
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 333b693..5ceb477 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1520,6 +1520,8 @@ def HasVendorXqcics
: Predicate<"Subtarget->hasVendorXqcics()">,
AssemblerPredicate<(all_of FeatureVendorXqcics),
"'Xqcics' (Qualcomm uC Conditional Select Extension)">;
+def NoVendorXqcics
+ : Predicate<"!Subtarget->hasVendorXqcics()">;
def FeatureVendorXqcicsr
: RISCVExperimentalExtension<0, 4, "Qualcomm uC CSR Extension">;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 7a14929..b9e01c3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1653,17 +1653,18 @@ def riscv_selectcc_frag : PatFrag<(ops node:$lhs, node:$rhs, node:$cc,
node:$falsev), [{}],
IntCCtoRISCVCC>;
-multiclass SelectCC_GPR_rrirr<DAGOperand valty, ValueType vt> {
+multiclass SelectCC_GPR_rrirr<DAGOperand valty, ValueType vt,
+ ValueType cmpvt = XLenVT> {
let usesCustomInserter = 1 in
def _Using_CC_GPR : Pseudo<(outs valty:$dst),
(ins GPR:$lhs, GPR:$rhs, cond_code:$cc,
valty:$truev, valty:$falsev),
[(set valty:$dst,
- (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), GPR:$rhs, cond,
+ (riscv_selectcc_frag:$cc (cmpvt GPR:$lhs), GPR:$rhs, cond,
(vt valty:$truev), valty:$falsev))]>;
// Explicitly select 0 in the condition to X0. The register coalescer doesn't
// always do it.
- def : Pat<(riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 0, cond, (vt valty:$truev),
+ def : Pat<(riscv_selectcc_frag:$cc (cmpvt GPR:$lhs), 0, cond, (vt valty:$truev),
valty:$falsev),
(!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, (XLenVT X0),
(IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index b9510ef..65e7e3b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -59,9 +59,9 @@ def FPR64IN32X : RegisterOperand<GPRPair> {
def DExt : ExtInfo<"", "", [HasStdExtD], f64, FPR64, FPR32, FPR64, ?>;
def ZdinxExt : ExtInfo<"_INX", "Zfinx", [HasStdExtZdinx, IsRV64],
- f64, FPR64INX, FPR32INX, FPR64INX, ?>;
+ f64, FPR64INX, FPR32INX, FPR64INX, ?, i64>;
def Zdinx32Ext : ExtInfo<"_IN32X", "ZdinxRV32Only", [HasStdExtZdinx, IsRV32],
- f64, FPR64IN32X, FPR32INX, FPR64IN32X, ?>;
+ f64, FPR64IN32X, FPR32INX, FPR64IN32X, ?, i32>;
defvar DExts = [DExt, ZdinxExt, Zdinx32Ext];
defvar DExtsRV64 = [DExt, ZdinxExt];
@@ -261,8 +261,10 @@ let Predicates = [HasStdExtZdinx, IsRV32] in {
/// Float conversion operations
// f64 -> f32, f32 -> f64
-def : Pat<(any_fpround FPR64IN32X:$rs1), (FCVT_S_D_IN32X FPR64IN32X:$rs1, FRM_DYN)>;
-def : Pat<(any_fpextend FPR32INX:$rs1), (FCVT_D_S_IN32X FPR32INX:$rs1, FRM_RNE)>;
+def : Pat<(any_fpround FPR64IN32X:$rs1),
+ (FCVT_S_D_IN32X FPR64IN32X:$rs1, (i32 FRM_DYN))>;
+def : Pat<(any_fpextend FPR32INX:$rs1),
+ (FCVT_D_S_IN32X FPR32INX:$rs1, (i32 FRM_RNE))>;
} // Predicates = [HasStdExtZdinx, IsRV32]
// [u]int<->double conversion patterns must be gated on IsRV32 or IsRV64, so
@@ -321,7 +323,7 @@ def : Pat<(any_fsqrt FPR64INX:$rs1), (FSQRT_D_INX FPR64INX:$rs1, FRM_DYN)>;
def : Pat<(fneg FPR64INX:$rs1), (FSGNJN_D_INX $rs1, $rs1)>;
def : Pat<(fabs FPR64INX:$rs1), (FSGNJX_D_INX $rs1, $rs1)>;
-def : Pat<(riscv_fclass FPR64INX:$rs1), (FCLASS_D_INX $rs1)>;
+def : Pat<(i64 (riscv_fclass FPR64INX:$rs1)), (FCLASS_D_INX $rs1)>;
def : PatFprFpr<fcopysign, FSGNJ_D_INX, FPR64INX, f64>;
def : PatFprFpr<riscv_fsgnjx, FSGNJX_D_INX, FPR64INX, f64>;
@@ -354,41 +356,46 @@ def : Pat<(fneg (any_fma_nsz FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3)),
} // Predicates = [HasStdExtZdinx, IsRV64]
let Predicates = [HasStdExtZdinx, IsRV32] in {
-def : Pat<(any_fsqrt FPR64IN32X:$rs1), (FSQRT_D_IN32X FPR64IN32X:$rs1, FRM_DYN)>;
+def : Pat<(any_fsqrt FPR64IN32X:$rs1),
+ (FSQRT_D_IN32X FPR64IN32X:$rs1, (i32 FRM_DYN))>;
def : Pat<(fneg FPR64IN32X:$rs1), (FSGNJN_D_IN32X $rs1, $rs1)>;
def : Pat<(fabs FPR64IN32X:$rs1), (FSGNJX_D_IN32X $rs1, $rs1)>;
-def : Pat<(riscv_fclass FPR64IN32X:$rs1), (FCLASS_D_IN32X $rs1)>;
+def : Pat<(i32 (riscv_fclass FPR64IN32X:$rs1)), (FCLASS_D_IN32X $rs1)>;
def : PatFprFpr<fcopysign, FSGNJ_D_IN32X, FPR64IN32X, f64>;
def : PatFprFpr<riscv_fsgnjx, FSGNJX_D_IN32X, FPR64IN32X, f64>;
def : Pat<(fcopysign FPR64IN32X:$rs1, (fneg FPR64IN32X:$rs2)),
(FSGNJN_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2)>;
def : Pat<(fcopysign FPR64IN32X:$rs1, FPR32INX:$rs2),
- (FSGNJ_D_IN32X $rs1, (FCVT_D_S_IN32X $rs2, FRM_RNE))>;
+ (FSGNJ_D_IN32X $rs1, (FCVT_D_S_IN32X $rs2, (i32 FRM_RNE)))>;
def : Pat<(fcopysign FPR32INX:$rs1, FPR64IN32X:$rs2),
- (FSGNJ_S_INX $rs1, (FCVT_S_D_IN32X $rs2, FRM_DYN))>;
+ (FSGNJ_S_INX $rs1, (FCVT_S_D_IN32X $rs2, (i32 FRM_DYN)))>;
// fmadd: rs1 * rs2 + rs3
def : Pat<(any_fma FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3),
- (FMADD_D_IN32X $rs1, $rs2, $rs3, FRM_DYN)>;
+ (FMADD_D_IN32X $rs1, $rs2, $rs3, (i32 FRM_DYN))>;
// fmsub: rs1 * rs2 - rs3
def : Pat<(any_fma FPR64IN32X:$rs1, FPR64IN32X:$rs2, (fneg FPR64IN32X:$rs3)),
- (FMSUB_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>;
+ (FMSUB_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3,
+ (i32 FRM_DYN))>;
// fnmsub: -rs1 * rs2 + rs3
def : Pat<(any_fma (fneg FPR64IN32X:$rs1), FPR64IN32X:$rs2, FPR64IN32X:$rs3),
- (FNMSUB_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>;
+ (FNMSUB_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3,
+ (i32 FRM_DYN))>;
// fnmadd: -rs1 * rs2 - rs3
def : Pat<(any_fma (fneg FPR64IN32X:$rs1), FPR64IN32X:$rs2, (fneg FPR64IN32X:$rs3)),
- (FNMADD_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>;
+ (FNMADD_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3,
+ (i32 FRM_DYN))>;
// fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA)
def : Pat<(fneg (any_fma_nsz FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3)),
- (FNMADD_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>;
+ (FNMADD_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3,
+ (i32 FRM_DYN))>;
} // Predicates = [HasStdExtZdinx, IsRV32]
// The ratified 20191213 ISA spec defines fmin and fmax in a way that matches
@@ -441,42 +448,42 @@ def : PatSetCC<FPR64, any_fsetccs, SETOLE, FLE_D, f64>;
let Predicates = [HasStdExtZdinx, IsRV64] in {
// Match signaling FEQ_D
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs2, SETEQ)),
+def : Pat<(XLenVT (strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETEQ)),
(AND (XLenVT (FLE_D_INX $rs1, $rs2)),
(XLenVT (FLE_D_INX $rs2, $rs1)))>;
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs2, SETOEQ)),
+def : Pat<(XLenVT (strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETOEQ)),
(AND (XLenVT (FLE_D_INX $rs1, $rs2)),
(XLenVT (FLE_D_INX $rs2, $rs1)))>;
// If both operands are the same, use a single FLE.
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs1, SETEQ)),
+def : Pat<(XLenVT (strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETEQ)),
(FLE_D_INX $rs1, $rs1)>;
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64INX:$rs1), FPR64INX:$rs1, SETOEQ)),
+def : Pat<(XLenVT (strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETOEQ)),
(FLE_D_INX $rs1, $rs1)>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETLT, FLT_D_INX, f64>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETOLT, FLT_D_INX, f64>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETLE, FLE_D_INX, f64>;
-def : PatSetCC<FPR64INX, any_fsetccs, SETOLE, FLE_D_INX, f64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETLT, FLT_D_INX, f64, i64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETOLT, FLT_D_INX, f64, i64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETLE, FLE_D_INX, f64, i64>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETOLE, FLE_D_INX, f64, i64>;
} // Predicates = [HasStdExtZdinx, IsRV64]
let Predicates = [HasStdExtZdinx, IsRV32] in {
// Match signaling FEQ_D
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64IN32X:$rs1), FPR64IN32X:$rs2, SETEQ)),
+def : Pat<(i32 (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETEQ)),
(AND (XLenVT (FLE_D_IN32X $rs1, $rs2)),
(XLenVT (FLE_D_IN32X $rs2, $rs1)))>;
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64IN32X:$rs1), FPR64IN32X:$rs2, SETOEQ)),
+def : Pat<(i32 (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETOEQ)),
(AND (XLenVT (FLE_D_IN32X $rs1, $rs2)),
(XLenVT (FLE_D_IN32X $rs2, $rs1)))>;
// If both operands are the same, use a single FLE.
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64IN32X:$rs1), FPR64IN32X:$rs1, SETEQ)),
+def : Pat<(i32 (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETEQ)),
(FLE_D_IN32X $rs1, $rs1)>;
-def : Pat<(XLenVT (strict_fsetccs (f64 FPR64IN32X:$rs1), FPR64IN32X:$rs1, SETOEQ)),
+def : Pat<(i32 (strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETOEQ)),
(FLE_D_IN32X $rs1, $rs1)>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETLT, FLT_D_IN32X, f64>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLT, FLT_D_IN32X, f64>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETLE, FLE_D_IN32X, f64>;
-def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLE, FLE_D_IN32X, f64>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETLT, FLT_D_IN32X, f64, i32>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLT, FLT_D_IN32X, f64, i32>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETLE, FLE_D_IN32X, f64, i32>;
+def : PatSetCC<FPR64IN32X, any_fsetccs, SETOLE, FLE_D_IN32X, f64, i32>;
} // Predicates = [HasStdExtZdinx, IsRV32]
let Predicates = [HasStdExtD] in {
@@ -511,7 +518,7 @@ def SplitF64Pseudo
} // Predicates = [HasStdExtD, NoStdExtZfa, IsRV32]
let Predicates = [HasStdExtZdinx, IsRV64] in {
-defm Select_FPR64INX : SelectCC_GPR_rrirr<FPR64INX, f64>;
+defm Select_FPR64INX : SelectCC_GPR_rrirr<FPR64INX, f64, i64>;
def PseudoFROUND_D_INX : PseudoFROUND<FPR64INX, f64>;
@@ -523,9 +530,9 @@ def : StPat<store, SD, GPR, f64>;
} // Predicates = [HasStdExtZdinx, IsRV64]
let Predicates = [HasStdExtZdinx, IsRV32] in {
-defm Select_FPR64IN32X : SelectCC_GPR_rrirr<FPR64IN32X, f64>;
+defm Select_FPR64IN32X : SelectCC_GPR_rrirr<FPR64IN32X, f64, i32>;
-def PseudoFROUND_D_IN32X : PseudoFROUND<FPR64IN32X, f64>;
+def PseudoFROUND_D_IN32X : PseudoFROUND<FPR64IN32X, f64, i32>;
/// Loads
let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index fde030e..6571d99 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -131,7 +131,7 @@ def FPR32INX : RegisterOperand<GPRF32> {
// The DAGOperand can be unset if the predicates are not enough to define it.
class ExtInfo<string suffix, string space, list<Predicate> predicates,
ValueType primaryvt, DAGOperand primaryty, DAGOperand f32ty,
- DAGOperand f64ty, DAGOperand f16ty> {
+ DAGOperand f64ty, DAGOperand f16ty, ValueType intvt = XLenVT> {
list<Predicate> Predicates = predicates;
string Suffix = suffix;
string Space = space;
@@ -140,6 +140,7 @@ class ExtInfo<string suffix, string space, list<Predicate> predicates,
DAGOperand F32Ty = f32ty;
DAGOperand F64Ty = f64ty;
ValueType PrimaryVT = primaryvt;
+ ValueType IntVT = intvt;
}
def FExt : ExtInfo<"", "", [HasStdExtF], f32, FPR32, FPR32, ?, ?>;
@@ -314,9 +315,9 @@ multiclass FPCmp_rr_m<bits<7> funct7, bits<3> funct3, string opcodestr,
def Ext.Suffix : FPCmp_rr<funct7, funct3, opcodestr, Ext.PrimaryTy, Commutable>;
}
-class PseudoFROUND<DAGOperand Ty, ValueType vt>
+class PseudoFROUND<DAGOperand Ty, ValueType vt, ValueType intvt = XLenVT>
: Pseudo<(outs Ty:$rd), (ins Ty:$rs1, Ty:$rs2, ixlenimm:$rm),
- [(set Ty:$rd, (vt (riscv_fround Ty:$rs1, Ty:$rs2, timm:$rm)))]> {
+ [(set Ty:$rd, (vt (riscv_fround Ty:$rs1, Ty:$rs2, (intvt timm:$rm))))]> {
let hasSideEffects = 0;
let mayLoad = 0;
let mayStore = 0;
@@ -529,13 +530,14 @@ def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
/// Generic pattern classes
class PatSetCC<DAGOperand Ty, SDPatternOperator OpNode, CondCode Cond,
- RVInstCommon Inst, ValueType vt>
- : Pat<(XLenVT (OpNode (vt Ty:$rs1), Ty:$rs2, Cond)), (Inst $rs1, $rs2)>;
+ RVInstCommon Inst, ValueType vt, ValueType intvt = XLenVT>
+ : Pat<(intvt (OpNode (vt Ty:$rs1), Ty:$rs2, Cond)), (Inst $rs1, $rs2)>;
multiclass PatSetCC_m<SDPatternOperator OpNode, CondCode Cond,
RVInstCommon Inst, ExtInfo Ext> {
let Predicates = Ext.Predicates in
def Ext.Suffix : PatSetCC<Ext.PrimaryTy, OpNode, Cond,
- !cast<RVInstCommon>(Inst#Ext.Suffix), Ext.PrimaryVT>;
+ !cast<RVInstCommon>(Inst#Ext.Suffix),
+ Ext.PrimaryVT, Ext.IntVT>;
}
class PatFprFpr<SDPatternOperator OpNode, RVInstR Inst,
@@ -549,14 +551,15 @@ multiclass PatFprFpr_m<SDPatternOperator OpNode, RVInstR Inst,
}
class PatFprFprDynFrm<SDPatternOperator OpNode, RVInstRFrm Inst,
- DAGOperand RegTy, ValueType vt>
- : Pat<(OpNode (vt RegTy:$rs1), (vt RegTy:$rs2)), (Inst $rs1, $rs2, FRM_DYN)>;
+ DAGOperand RegTy, ValueType vt, ValueType intvt>
+ : Pat<(OpNode (vt RegTy:$rs1), (vt RegTy:$rs2)),
+ (Inst $rs1, $rs2,(intvt FRM_DYN))>;
multiclass PatFprFprDynFrm_m<SDPatternOperator OpNode, RVInstRFrm Inst,
ExtInfo Ext> {
let Predicates = Ext.Predicates in
def Ext.Suffix : PatFprFprDynFrm<OpNode,
!cast<RVInstRFrm>(Inst#Ext.Suffix),
- Ext.PrimaryTy, Ext.PrimaryVT>;
+ Ext.PrimaryTy, Ext.PrimaryVT, Ext.IntVT>;
}
/// Float conversion operations
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index f2724c41..5e1d07a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -1571,35 +1571,42 @@ def : QCIMVCCIPat<SETUGE, QC_MVGEUI, uimm5nonzero>;
}
let Predicates = [HasVendorXqcicli, IsRV32] in {
-def : QCILICCPat<SETEQ, QC_LIEQ>;
-def : QCILICCPat<SETNE, QC_LINE>;
def : QCILICCPat<SETLT, QC_LILT>;
def : QCILICCPat<SETGE, QC_LIGE>;
def : QCILICCPat<SETULT, QC_LILTU>;
def : QCILICCPat<SETUGE, QC_LIGEU>;
-def : QCILICCIPat<SETEQ, QC_LIEQI, simm5>;
-def : QCILICCIPat<SETNE, QC_LINEI, simm5>;
def : QCILICCIPat<SETLT, QC_LILTI, simm5>;
def : QCILICCIPat<SETGE, QC_LIGEI, simm5>;
def : QCILICCIPat<SETULT, QC_LILTUI, uimm5>;
def : QCILICCIPat<SETUGE, QC_LIGEUI, uimm5>;
-def : QCILICCPatInv<SETNE, QC_LIEQ>;
-def : QCILICCPatInv<SETEQ, QC_LINE>;
def : QCILICCPatInv<SETGE, QC_LILT>;
def : QCILICCPatInv<SETLT, QC_LIGE>;
def : QCILICCPatInv<SETUGE, QC_LILTU>;
def : QCILICCPatInv<SETULT, QC_LIGEU>;
-def : QCILICCIPatInv<SETNE, QC_LIEQI, simm5>;
-def : QCILICCIPatInv<SETEQ, QC_LINEI, simm5>;
def : QCILICCIPatInv<SETGE, QC_LILTI, simm5>;
def : QCILICCIPatInv<SETLT, QC_LIGEI, simm5>;
def : QCILICCIPatInv<SETUGE, QC_LILTUI, uimm5>;
def : QCILICCIPatInv<SETULT, QC_LIGEUI, uimm5>;
} // Predicates = [HasVendorXqcicli, IsRV32]
+// Prioritize Xqcics over these patterns.
+let Predicates = [HasVendorXqcicli, NoVendorXqcics, IsRV32] in {
+def : QCILICCPat<SETEQ, QC_LIEQ>;
+def : QCILICCPat<SETNE, QC_LINE>;
+
+def : QCILICCIPat<SETEQ, QC_LIEQI, simm5>;
+def : QCILICCIPat<SETNE, QC_LINEI, simm5>;
+
+def : QCILICCPatInv<SETNE, QC_LIEQ>;
+def : QCILICCPatInv<SETEQ, QC_LINE>;
+
+def : QCILICCIPatInv<SETNE, QC_LIEQI, simm5>;
+def : QCILICCIPatInv<SETEQ, QC_LINEI, simm5>;
+} // Predicates = [HasVendorXqcicli, NoVendorXqcics, IsRV32]
+
let Predicates = [HasVendorXqcics, IsRV32] in {
// (SELECT X, Y, Z) is canonicalised to `(riscv_selectcc x, 0, NE, y, z)`.
// These exist to prioritise over the `Select_GPR_Using_CC_GPR` pattern.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 014da99..52a2b29 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -69,16 +69,16 @@ def ZhinxminExt : ExtInfo<"_INX", "Zfinx",
f16, FPR16INX, FPR32INX, ?, FPR16INX>;
def ZhinxZdinxExt : ExtInfo<"_INX", "Zfinx",
[HasStdExtZhinx, HasStdExtZdinx, IsRV64],
- ?, ?, FPR32INX, FPR64INX, FPR16INX>;
+ ?, ?, FPR32INX, FPR64INX, FPR16INX, i64>;
def ZhinxminZdinxExt : ExtInfo<"_INX", "Zfinx",
[HasStdExtZhinxmin, HasStdExtZdinx, IsRV64],
- ?, ?, FPR32INX, FPR64INX, FPR16INX>;
+ ?, ?, FPR32INX, FPR64INX, FPR16INX, i64>;
def ZhinxZdinx32Ext : ExtInfo<"_IN32X", "ZdinxGPRPairRV32",
[HasStdExtZhinx, HasStdExtZdinx, IsRV32],
- ?, ?, FPR32INX, FPR64IN32X, FPR16INX>;
+ ?, ?, FPR32INX, FPR64IN32X, FPR16INX, i32>;
def ZhinxminZdinx32Ext : ExtInfo<"_IN32X", "ZdinxGPRPairRV32",
[HasStdExtZhinxmin, HasStdExtZdinx, IsRV32],
- ?, ?, FPR32INX, FPR64IN32X, FPR16INX>;
+ ?, ?, FPR32INX, FPR64IN32X, FPR16INX, i32>;
defvar ZfhExts = [ZfhExt, ZhinxExt];
defvar ZfhminExts = [ZfhminExt, ZhinxminExt];
diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt
index 46afe03..eab7b21 100644
--- a/llvm/lib/Target/SPIRV/CMakeLists.txt
+++ b/llvm/lib/Target/SPIRV/CMakeLists.txt
@@ -36,6 +36,7 @@ add_llvm_target(SPIRVCodeGen
SPIRVMetadata.cpp
SPIRVModuleAnalysis.cpp
SPIRVStructurizer.cpp
+ SPIRVCombinerHelper.cpp
SPIRVPreLegalizer.cpp
SPIRVPreLegalizerCombiner.cpp
SPIRVPostLegalizer.cpp
diff --git a/llvm/lib/Target/SPIRV/SPIRVCombine.td b/llvm/lib/Target/SPIRV/SPIRVCombine.td
index 6f726e0..fde56c4 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCombine.td
+++ b/llvm/lib/Target/SPIRV/SPIRVCombine.td
@@ -11,8 +11,8 @@ include "llvm/Target/GlobalISel/Combine.td"
def vector_length_sub_to_distance_lowering : GICombineRule <
(defs root:$root),
(match (wip_match_opcode G_INTRINSIC):$root,
- [{ return matchLengthToDistance(*${root}, MRI); }]),
- (apply [{ applySPIRVDistance(*${root}, MRI, B); }])
+ [{ return Helper.matchLengthToDistance(*${root}); }]),
+ (apply [{ Helper.applySPIRVDistance(*${root}); }])
>;
def SPIRVPreLegalizerCombiner
diff --git a/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp
new file mode 100644
index 0000000..267794c
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp
@@ -0,0 +1,60 @@
+//===-- SPIRVCombinerHelper.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SPIRVCombinerHelper.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
+#include "llvm/IR/IntrinsicsSPIRV.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+using namespace MIPatternMatch;
+
+SPIRVCombinerHelper::SPIRVCombinerHelper(
+ GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize,
+ GISelValueTracking *VT, MachineDominatorTree *MDT, const LegalizerInfo *LI,
+ const SPIRVSubtarget &STI)
+ : CombinerHelper(Observer, B, IsPreLegalize, VT, MDT, LI), STI(STI) {}
+
+/// This match is part of a combine that
+/// rewrites length(X - Y) to distance(X, Y)
+/// (f32 (g_intrinsic length
+/// (g_fsub (vXf32 X) (vXf32 Y))))
+/// ->
+/// (f32 (g_intrinsic distance
+/// (vXf32 X) (vXf32 Y)))
+///
+bool SPIRVCombinerHelper::matchLengthToDistance(MachineInstr &MI) const {
+ if (MI.getOpcode() != TargetOpcode::G_INTRINSIC ||
+ cast<GIntrinsic>(MI).getIntrinsicID() != Intrinsic::spv_length)
+ return false;
+
+ // First operand of MI is `G_INTRINSIC` so start at operand 2.
+ Register SubReg = MI.getOperand(2).getReg();
+ MachineInstr *SubInstr = MRI.getVRegDef(SubReg);
+ if (SubInstr->getOpcode() != TargetOpcode::G_FSUB)
+ return false;
+
+ return true;
+}
+
+void SPIRVCombinerHelper::applySPIRVDistance(MachineInstr &MI) const {
+ // Extract the operands for X and Y from the match criteria.
+ Register SubDestReg = MI.getOperand(2).getReg();
+ MachineInstr *SubInstr = MRI.getVRegDef(SubDestReg);
+ Register SubOperand1 = SubInstr->getOperand(1).getReg();
+ Register SubOperand2 = SubInstr->getOperand(2).getReg();
+ Register ResultReg = MI.getOperand(0).getReg();
+
+ Builder.setInstrAndDebugLoc(MI);
+ Builder.buildIntrinsic(Intrinsic::spv_distance, ResultReg)
+ .addUse(SubOperand1)
+ .addUse(SubOperand2);
+
+ MI.eraseFromParent();
+}
diff --git a/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h
new file mode 100644
index 0000000..0b39d34
--- /dev/null
+++ b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h
@@ -0,0 +1,38 @@
+//===-- SPIRVCombinerHelper.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This contains common combine transformations that may be used in a combine
+/// pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H
+#define LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H
+
+#include "SPIRVSubtarget.h"
+#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
+
+namespace llvm {
+class SPIRVCombinerHelper : public CombinerHelper {
+protected:
+ const SPIRVSubtarget &STI;
+
+public:
+ using CombinerHelper::CombinerHelper;
+ SPIRVCombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
+ bool IsPreLegalize, GISelValueTracking *VT,
+ MachineDominatorTree *MDT, const LegalizerInfo *LI,
+ const SPIRVSubtarget &STI);
+
+ bool matchLengthToDistance(MachineInstr &MI) const;
+ void applySPIRVDistance(MachineInstr &MI) const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
index e8c849e..28a1690 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
@@ -46,7 +46,6 @@
#include "SPIRVSubtarget.h"
#include "SPIRVTargetMachine.h"
#include "SPIRVUtils.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
diff --git a/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp b/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp
index 20f03b0..60d39c9 100644
--- a/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp
@@ -19,7 +19,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Intrinsics.h"
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp
index 8356751..48f4047 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp
@@ -1,4 +1,3 @@
-
//===-- SPIRVPreLegalizerCombiner.cpp - combine legalization ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@@ -13,24 +12,17 @@
//===----------------------------------------------------------------------===//
#include "SPIRV.h"
-#include "SPIRVTargetMachine.h"
+#include "SPIRVCombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/Combiner.h"
-#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
-#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
-#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
-#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
-#include "llvm/IR/IntrinsicsSPIRV.h"
#define GET_GICOMBINER_DEPS
#include "SPIRVGenPreLegalizeGICombiner.inc"
@@ -47,72 +39,9 @@ namespace {
#include "SPIRVGenPreLegalizeGICombiner.inc"
#undef GET_GICOMBINER_TYPES
-/// This match is part of a combine that
-/// rewrites length(X - Y) to distance(X, Y)
-/// (f32 (g_intrinsic length
-/// (g_fsub (vXf32 X) (vXf32 Y))))
-/// ->
-/// (f32 (g_intrinsic distance
-/// (vXf32 X) (vXf32 Y)))
-///
-bool matchLengthToDistance(MachineInstr &MI, MachineRegisterInfo &MRI) {
- if (MI.getOpcode() != TargetOpcode::G_INTRINSIC ||
- cast<GIntrinsic>(MI).getIntrinsicID() != Intrinsic::spv_length)
- return false;
-
- // First operand of MI is `G_INTRINSIC` so start at operand 2.
- Register SubReg = MI.getOperand(2).getReg();
- MachineInstr *SubInstr = MRI.getVRegDef(SubReg);
- if (!SubInstr || SubInstr->getOpcode() != TargetOpcode::G_FSUB)
- return false;
-
- return true;
-}
-void applySPIRVDistance(MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &B) {
-
- // Extract the operands for X and Y from the match criteria.
- Register SubDestReg = MI.getOperand(2).getReg();
- MachineInstr *SubInstr = MRI.getVRegDef(SubDestReg);
- Register SubOperand1 = SubInstr->getOperand(1).getReg();
- Register SubOperand2 = SubInstr->getOperand(2).getReg();
-
- // Remove the original `spv_length` instruction.
-
- Register ResultReg = MI.getOperand(0).getReg();
- DebugLoc DL = MI.getDebugLoc();
- MachineBasicBlock &MBB = *MI.getParent();
- MachineBasicBlock::iterator InsertPt = MI.getIterator();
-
- // Build the `spv_distance` intrinsic.
- MachineInstrBuilder NewInstr =
- BuildMI(MBB, InsertPt, DL, B.getTII().get(TargetOpcode::G_INTRINSIC));
- NewInstr
- .addDef(ResultReg) // Result register
- .addIntrinsicID(Intrinsic::spv_distance) // Intrinsic ID
- .addUse(SubOperand1) // Operand X
- .addUse(SubOperand2); // Operand Y
-
- SPIRVGlobalRegistry *GR =
- MI.getMF()->getSubtarget<SPIRVSubtarget>().getSPIRVGlobalRegistry();
- auto RemoveAllUses = [&](Register Reg) {
- SmallVector<MachineInstr *, 4> UsesToErase(
- llvm::make_pointer_range(MRI.use_instructions(Reg)));
-
- // calling eraseFromParent to early invalidates the iterator.
- for (auto *MIToErase : UsesToErase) {
- GR->invalidateMachineInstr(MIToErase);
- MIToErase->eraseFromParent();
- }
- };
- RemoveAllUses(SubDestReg); // remove all uses of FSUB Result
- GR->invalidateMachineInstr(SubInstr);
- SubInstr->eraseFromParent(); // remove FSUB instruction
-}
-
class SPIRVPreLegalizerCombinerImpl : public Combiner {
protected:
- const CombinerHelper Helper;
+ const SPIRVCombinerHelper Helper;
const SPIRVPreLegalizerCombinerImplRuleConfig &RuleConfig;
const SPIRVSubtarget &STI;
@@ -147,7 +76,7 @@ SPIRVPreLegalizerCombinerImpl::SPIRVPreLegalizerCombinerImpl(
const SPIRVSubtarget &STI, MachineDominatorTree *MDT,
const LegalizerInfo *LI)
: Combiner(MF, CInfo, TPC, &VT, CSEInfo),
- Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI),
+ Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI, STI),
RuleConfig(RuleConfig), STI(STI),
#define GET_GICOMBINER_CONSTRUCTOR_INITS
#include "SPIRVGenPreLegalizeGICombiner.inc"
diff --git a/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp
index 278ad7c..e621bcd44 100644
--- a/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp
@@ -14,7 +14,6 @@
#include "SPIRV.h"
#include "SPIRVSubtarget.h"
#include "SPIRVUtils.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Transforms/Utils/Cloning.h"
diff --git a/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp b/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp
index 1811492..5b149f8 100644
--- a/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp
@@ -16,7 +16,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9580ade..eea84a2 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -28,7 +28,6 @@
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/VectorUtils.h"
-#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -3634,7 +3633,7 @@ bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {
}
bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
- const SDNode *N, CombineLevel Level) const {
+ const SDNode *N) const {
assert(((N->getOpcode() == ISD::SHL &&
N->getOperand(0).getOpcode() == ISD::SRL) ||
(N->getOpcode() == ISD::SRL &&
@@ -3649,7 +3648,7 @@ bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
// the fold for non-splats yet.
return N->getOperand(1) == N->getOperand(0).getOperand(1);
}
- return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
+ return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N);
}
bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index b55556a..e28b9c1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1244,8 +1244,7 @@ namespace llvm {
getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs,
const Value *Rhs) const override;
- bool shouldFoldConstantShiftPairToMask(const SDNode *N,
- CombineLevel Level) const override;
+ bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override;
bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override;
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 3bc46af..6dd43b2 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -547,7 +547,7 @@ unsigned X86TargetLowering::getAddressSpace() const {
static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
- (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
+ TargetTriple.isAndroid();
}
static Constant* SegmentOffset(IRBuilderBase &IRB,