aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Target/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/X86')
-rw-r--r--llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp48
-rw-r--r--llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp9
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp16
3 files changed, 44 insertions, 29 deletions
diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index 100f1ec..53ec712 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -1879,28 +1879,34 @@ bool X86InstructionSelector::selectSelect(MachineInstr &I,
unsigned OpCmp;
LLT Ty = MRI.getType(DstReg);
- switch (Ty.getSizeInBits()) {
- default:
- return false;
- case 8:
- OpCmp = X86::CMOV_GR8;
- break;
- case 16:
- OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
- break;
- case 32:
- OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
- break;
- case 64:
- assert(STI.is64Bit() && STI.canUseCMOV());
- OpCmp = X86::CMOV64rr;
- break;
+ if (Ty.getSizeInBits() == 80) {
+ BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(X86::CMOVE_Fp80),
+ DstReg)
+ .addReg(Sel.getTrueReg())
+ .addReg(Sel.getFalseReg());
+ } else {
+ switch (Ty.getSizeInBits()) {
+ default:
+ return false;
+ case 8:
+ OpCmp = X86::CMOV_GR8;
+ break;
+ case 16:
+ OpCmp = STI.canUseCMOV() ? X86::CMOV16rr : X86::CMOV_GR16;
+ break;
+ case 32:
+ OpCmp = STI.canUseCMOV() ? X86::CMOV32rr : X86::CMOV_GR32;
+ break;
+ case 64:
+ assert(STI.is64Bit() && STI.canUseCMOV());
+ OpCmp = X86::CMOV64rr;
+ break;
+ }
+ BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
+ .addReg(Sel.getTrueReg())
+ .addReg(Sel.getFalseReg())
+ .addImm(X86::COND_E);
}
- BuildMI(*Sel.getParent(), Sel, Sel.getDebugLoc(), TII.get(OpCmp), DstReg)
- .addReg(Sel.getTrueReg())
- .addReg(Sel.getFalseReg())
- .addImm(X86::COND_E);
-
const TargetRegisterClass *DstRC = getRegClass(Ty, DstReg, MRI);
if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
LLVM_DEBUG(dbgs() << "Failed to constrain CMOV\n");
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
index 28fa2cd..e792b1b 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.cpp
@@ -575,10 +575,13 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
// todo: vectors and address spaces
getActionDefinitionsBuilder(G_SELECT)
- .legalFor({{s8, s32}, {s16, s32}, {s32, s32}, {s64, s32}, {p0, s32}})
+ .legalFor({{s16, s32}, {s32, s32}, {p0, s32}})
+ .legalFor(!HasCMOV, {{s8, s32}})
+ .legalFor(Is64Bit, {{s64, s32}})
+ .legalFor(UseX87, {{s80, s32}})
+ .clampScalar(1, s32, s32)
.widenScalarToNextPow2(0, /*Min=*/8)
- .clampScalar(0, HasCMOV ? s16 : s8, sMaxScalar)
- .clampScalar(1, s32, s32);
+ .clampScalar(0, HasCMOV ? s16 : s8, sMaxScalar);
// memory intrinsics
getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2feee05..b5f8ee5 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -41846,7 +41846,7 @@ static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
return SDValue();
- Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
+ Imm = llvm::rotl<uint8_t>(Imm, 4);
return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
DAG.getTargetConstant(Imm, DL, MVT::i8));
};
@@ -44813,10 +44813,16 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
}
case X86ISD::PCMPGT:
// icmp sgt(0, R) == ashr(R, BitWidth-1).
- // iff we only need the sign bit then we can use R directly.
- if (OriginalDemandedBits.isSignMask() &&
- ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
- return TLO.CombineTo(Op, Op.getOperand(1));
+ if (ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode())) {
+ // iff we only need the signbit then we can use R directly.
+ if (OriginalDemandedBits.isSignMask())
+ return TLO.CombineTo(Op, Op.getOperand(1));
+ // otherwise we just need R's signbit for the comparison.
+ APInt SignMask = APInt::getSignMask(BitWidth);
+ if (SimplifyDemandedBits(Op.getOperand(1), SignMask, OriginalDemandedElts,
+ Known, TLO, Depth + 1))
+ return true;
+ }
break;
case X86ISD::MOVMSK: {
SDValue Src = Op.getOperand(0);