diff options
Diffstat (limited to 'llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp')
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 126 |
1 files changed, 92 insertions, 34 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 11e869a..d70e96938 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4075,18 +4075,11 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { unsigned BitWidth = VT.getScalarSizeInBits(); SDLoc DL(N); - auto PeekThroughFreeze = [](SDValue N) { - if (N->getOpcode() == ISD::FREEZE && N.hasOneUse()) - return N->getOperand(0); - return N; - }; - if (SDValue V = foldSubCtlzNot<EmptyMatchContext>(N, DAG)) return V; // fold (sub x, x) -> 0 - // FIXME: Refactor this and xor and other similar operations together. - if (PeekThroughFreeze(N0) == PeekThroughFreeze(N1)) + if (N0 == N1) return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations); // fold (sub c1, c2) -> c3 @@ -6499,19 +6492,21 @@ static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, // It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands // are non NaN values. if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) || - ((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND))) + ((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND))) { return arebothOperandsNotNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM_IEEE ? ISD::FMINNUM_IEEE : ISD::DELETED_NODE; - else if (((CC == ISD::SETGT || CC == ISD::SETGE) && - (OrAndOpcode == ISD::OR)) || - ((CC == ISD::SETLT || CC == ISD::SETLE) && - (OrAndOpcode == ISD::AND))) + } + + if (((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::OR)) || + ((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::AND))) { return arebothOperandsNotNan(Operand1, Operand2, DAG) && isFMAXNUMFMINNUM_IEEE ? ISD::FMAXNUM_IEEE : ISD::DELETED_NODE; + } + // Both FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle quiet // NaNs in the same way. But, FMINNUM/FMAXNUM and FMINNUM_IEEE/ // FMAXNUM_IEEE handle signaling NaNs differently. If we cannot prove @@ -6521,24 +6516,24 @@ static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, // we can prove that we do not have any sNaNs, then we can do the // optimization using FMINNUM_IEEE/FMAXNUM_IEEE for the following // cases. - else if (((CC == ISD::SETOLT || CC == ISD::SETOLE) && - (OrAndOpcode == ISD::OR)) || - ((CC == ISD::SETUGT || CC == ISD::SETUGE) && - (OrAndOpcode == ISD::AND))) + if (((CC == ISD::SETOLT || CC == ISD::SETOLE) && (OrAndOpcode == ISD::OR)) || + ((CC == ISD::SETUGT || CC == ISD::SETUGE) && (OrAndOpcode == ISD::AND))) { return isFMAXNUMFMINNUM ? ISD::FMINNUM - : arebothOperandsNotSNan(Operand1, Operand2, DAG) && - isFMAXNUMFMINNUM_IEEE - ? ISD::FMINNUM_IEEE - : ISD::DELETED_NODE; - else if (((CC == ISD::SETOGT || CC == ISD::SETOGE) && - (OrAndOpcode == ISD::OR)) || - ((CC == ISD::SETULT || CC == ISD::SETULE) && - (OrAndOpcode == ISD::AND))) + : arebothOperandsNotSNan(Operand1, Operand2, DAG) && + isFMAXNUMFMINNUM_IEEE + ? ISD::FMINNUM_IEEE + : ISD::DELETED_NODE; + } + + if (((CC == ISD::SETOGT || CC == ISD::SETOGE) && (OrAndOpcode == ISD::OR)) || + ((CC == ISD::SETULT || CC == ISD::SETULE) && (OrAndOpcode == ISD::AND))) { return isFMAXNUMFMINNUM ? ISD::FMAXNUM - : arebothOperandsNotSNan(Operand1, Operand2, DAG) && - isFMAXNUMFMINNUM_IEEE - ? ISD::FMAXNUM_IEEE - : ISD::DELETED_NODE; + : arebothOperandsNotSNan(Operand1, Operand2, DAG) && + isFMAXNUMFMINNUM_IEEE + ? ISD::FMAXNUM_IEEE + : ISD::DELETED_NODE; + } + return ISD::DELETED_NODE; } @@ -13184,14 +13179,14 @@ static SDValue combineVSelectWithAllOnesOrZeros(SDValue Cond, SDValue TVal, // select Cond, -1, x → or Cond, x if (IsTAllOne) { - SDValue X = DAG.getBitcast(CondVT, FVal); + SDValue X = DAG.getBitcast(CondVT, DAG.getFreeze(FVal)); SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, X); return DAG.getBitcast(VT, Or); } // select Cond, x, 0 → and Cond, x if (IsFAllZero) { - SDValue X = DAG.getBitcast(CondVT, TVal); + SDValue X = DAG.getBitcast(CondVT, DAG.getFreeze(TVal)); SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, X); return DAG.getBitcast(VT, And); } @@ -13199,7 +13194,7 @@ static SDValue combineVSelectWithAllOnesOrZeros(SDValue Cond, SDValue TVal, // select Cond, 0, x -> and not(Cond), x if (IsTAllZero && (isBitwiseNot(peekThroughBitcasts(Cond)) || TLI.hasAndNot(Cond))) { - SDValue X = DAG.getBitcast(CondVT, FVal); + SDValue X = DAG.getBitcast(CondVT, DAG.getFreeze(FVal)); SDValue And = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT), X); return DAG.getBitcast(VT, And); @@ -16754,6 +16749,17 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) { if (DAG.isGuaranteedNotToBeUndefOrPoison(N0, /*PoisonOnly*/ false)) return N0; + // If we have frozen and unfrozen users of N0, update so everything uses N. + if (!N0.isUndef() && !N0.hasOneUse()) { + SDValue FrozenN0(N, 0); + DAG.ReplaceAllUsesOfValueWith(N0, FrozenN0); + // ReplaceAllUsesOfValueWith will have also updated the use in N, thus + // creating a cycle in a DAG. Let's undo that by mutating the freeze. + assert(N->getOperand(0) == FrozenN0 && "Expected cycle in DAG"); + DAG.UpdateNodeOperands(N, N0); + return FrozenN0; + } + // We currently avoid folding freeze over SRA/SRL, due to the problems seen // with (freeze (assert ext)) blocking simplifications of SRA/SRL. See for // example https://reviews.llvm.org/D136529#4120959. @@ -16807,8 +16813,7 @@ SDValue DAGCombiner::visitFREEZE(SDNode *N) { SmallSet<SDValue, 8> MaybePoisonOperands; SmallVector<unsigned, 8> MaybePoisonOperandNumbers; for (auto [OpNo, Op] : enumerate(N0->ops())) { - if (DAG.isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly*/ false, - /*Depth*/ 1)) + if (DAG.isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly=*/false)) continue; bool HadMaybePoisonOperands = !MaybePoisonOperands.empty(); bool IsNewMaybePoisonOperand = MaybePoisonOperands.insert(Op).second; @@ -22534,6 +22539,56 @@ SDValue DAGCombiner::visitATOMIC_STORE(SDNode *N) { return SDValue(); } +static SDValue foldToMaskedStore(StoreSDNode *Store, SelectionDAG &DAG, + const SDLoc &Dl) { + if (!Store->isSimple() || !ISD::isNormalStore(Store)) + return SDValue(); + + SDValue StoredVal = Store->getValue(); + SDValue StorePtr = Store->getBasePtr(); + SDValue StoreOffset = Store->getOffset(); + EVT VT = Store->getMemoryVT(); + unsigned AddrSpace = Store->getAddressSpace(); + Align Alignment = Store->getAlign(); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + + if (!TLI.isOperationLegalOrCustom(ISD::MSTORE, VT) || + !TLI.allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment)) + return SDValue(); + + SDValue Mask, OtherVec, LoadCh; + unsigned LoadPos; + if (sd_match(StoredVal, + m_VSelect(m_Value(Mask), m_Value(OtherVec), + m_Load(m_Value(LoadCh), m_Specific(StorePtr), + m_Specific(StoreOffset))))) { + LoadPos = 2; + } else if (sd_match(StoredVal, + m_VSelect(m_Value(Mask), + m_Load(m_Value(LoadCh), m_Specific(StorePtr), + m_Specific(StoreOffset)), + m_Value(OtherVec)))) { + LoadPos = 1; + } else { + return SDValue(); + } + + auto *Load = cast<LoadSDNode>(StoredVal.getOperand(LoadPos)); + if (!Load->isSimple() || !ISD::isNormalLoad(Load) || + Load->getAddressSpace() != AddrSpace) + return SDValue(); + + if (!Store->getChain().reachesChainWithoutSideEffects(LoadCh)) + return SDValue(); + + if (LoadPos == 1) + Mask = DAG.getNOT(Dl, Mask, Mask.getValueType()); + + return DAG.getMaskedStore(Store->getChain(), Dl, OtherVec, StorePtr, + StoreOffset, Mask, VT, Store->getMemOperand(), + Store->getAddressingMode()); +} + SDValue DAGCombiner::visitSTORE(SDNode *N) { StoreSDNode *ST = cast<StoreSDNode>(N); SDValue Chain = ST->getChain(); @@ -22768,6 +22823,9 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { if (SDValue NewSt = splitMergedValStore(ST)) return NewSt; + if (SDValue MaskedStore = foldToMaskedStore(ST, DAG, SDLoc(N))) + return MaskedStore; + return ReduceLoadOpStoreWidth(N); } |