aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen/SelectionDAG
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen/SelectionDAG')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp87
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp13
2 files changed, 94 insertions, 6 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index cf221bb..1ef5dc2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -23506,6 +23506,93 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
return DAG.getSplat(VT, DL, InVal);
+
+ // Extend this type to be byte-addressable
+ EVT OldVT = VT;
+ EVT EltVT = VT.getVectorElementType();
+ bool IsByteSized = EltVT.isByteSized();
+ if (!IsByteSized) {
+ EltVT =
+ EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VT = VT.changeElementType(EltVT);
+ }
+
+ // Check if this operation will be handled the default way for its type.
+ auto IsTypeDefaultHandled = [this](EVT VT) {
+ return TLI.getTypeAction(*DAG.getContext(), VT) ==
+ TargetLowering::TypeSplitVector ||
+ TLI.isOperationExpand(ISD::INSERT_VECTOR_ELT, VT);
+ };
+
+ // Check if this operation is illegal and will be handled the default way,
+ // even after extending the type to be byte-addressable.
+ if (IsTypeDefaultHandled(OldVT) && IsTypeDefaultHandled(VT)) {
+ // For each dynamic insertelt, the default way will save the vector to
+ // the stack, store at an offset, and load the modified vector. This can
+ // dramatically increase code size if we have a chain of insertelts on a
+ // large vector: requiring O(V*C) stores/loads where V = length of
+ // vector and C is length of chain. If each insertelt is only fed into the
+ // next, the vector is write-only across this chain, and we can just
+ // save once before the chain and load after in O(V + C) operations.
+ SmallVector<SDNode *> Seq{N};
+ unsigned NumDynamic = 1;
+ while (true) {
+ SDValue InVec = Seq.back()->getOperand(0);
+ if (InVec.getOpcode() != ISD::INSERT_VECTOR_ELT)
+ break;
+ Seq.push_back(InVec.getNode());
+ NumDynamic += !isa<ConstantSDNode>(InVec.getOperand(2));
+ }
+
+ // It always and only makes sense to lower this sequence when we have more
+ // than one dynamic insertelt, since we will not have more than V constant
+ // insertelts, so we will be reducing the total number of stores+loads.
+ if (NumDynamic > 1) {
+ // In cases where the vector is illegal it will be broken down into
+ // parts and stored in parts - we should use the alignment for the
+ // smallest part.
+ Align SmallestAlign = DAG.getReducedAlign(VT, /*UseABI=*/false);
+ SDValue StackPtr =
+ DAG.CreateStackTemporary(VT.getStoreSize(), SmallestAlign);
+ auto &MF = DAG.getMachineFunction();
+ int FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
+ auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
+
+ // Save the vector to the stack
+ SDValue InVec = Seq.back()->getOperand(0);
+ if (!IsByteSized)
+ InVec = DAG.getNode(ISD::ANY_EXTEND, DL, VT, InVec);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), DL, InVec, StackPtr,
+ PtrInfo, SmallestAlign);
+
+ // Lower each dynamic insertelt to a store
+ for (SDNode *N : reverse(Seq)) {
+ SDValue Elmnt = N->getOperand(1);
+ SDValue Index = N->getOperand(2);
+
+ // Check if we have to extend the element type
+ if (!IsByteSized && Elmnt.getValueType().bitsLT(EltVT))
+ Elmnt = DAG.getNode(ISD::ANY_EXTEND, DL, EltVT, Elmnt);
+
+ // Store the new element. This may be larger than the vector element
+ // type, so use a truncating store.
+ SDValue EltPtr =
+ TLI.getVectorElementPointer(DAG, StackPtr, VT, Index);
+ EVT EltVT = Elmnt.getValueType();
+ Store = DAG.getTruncStore(
+ Store, DL, Elmnt, EltPtr, MachinePointerInfo::getUnknownStack(MF),
+ EltVT,
+ commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8));
+ }
+
+ // Load the saved vector from the stack
+ SDValue Load =
+ DAG.getLoad(VT, DL, Store, StackPtr, PtrInfo, SmallestAlign);
+ SDValue LoadV = Load.getValue(0);
+ return IsByteSized ? LoadV : DAG.getAnyExtOrTrunc(LoadV, DL, OldVT);
+ }
+ }
+
return SDValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 5fb7e63..431a810 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2400,10 +2400,11 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
Results.push_back(Rem);
}
-/// Return true if sincos libcall is available.
+/// Return true if sincos or __sincos_stret libcall is available.
static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
- RTLIB::Libcall LC = RTLIB::getSINCOS(Node->getSimpleValueType(0).SimpleTy);
- return TLI.getLibcallName(LC) != nullptr;
+ MVT::SimpleValueType VT = Node->getSimpleValueType(0).SimpleTy;
+ return TLI.getLibcallImpl(RTLIB::getSINCOS(VT)) != RTLIB::Unsupported ||
+ TLI.getLibcallImpl(RTLIB::getSINCOS_STRET(VT)) != RTLIB::Unsupported;
}
/// Only issue sincos libcall if both sin and cos are needed.
@@ -3752,9 +3753,9 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT VT = Node->getValueType(0);
// Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin /
// fcos which share the same operand and both are used.
- if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) ||
- isSinCosLibcallAvailable(Node, TLI))
- && useSinCos(Node)) {
+ if ((TLI.isOperationLegal(ISD::FSINCOS, VT) ||
+ isSinCosLibcallAvailable(Node, TLI)) &&
+ useSinCos(Node)) {
SDVTList VTs = DAG.getVTList(VT, VT);
Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0));
if (Node->getOpcode() == ISD::FCOS)