diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2025-03-08 11:49:55 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-03-08 11:49:55 +0000 |
commit | 73e14de207a3aa0fa071fa56756e8e316edf5227 (patch) | |
tree | 7d8b53722b678b37282e9eca446fde000691a5af /llvm/lib | |
parent | f3390fca034d2762f87ac8057078d6e9661305ce (diff) | |
download | llvm-73e14de207a3aa0fa071fa56756e8e316edf5227.zip llvm-73e14de207a3aa0fa071fa56756e8e316edf5227.tar.gz llvm-73e14de207a3aa0fa071fa56756e8e316edf5227.tar.bz2 |
[X86] combineConcatVectorOps - recursively call combineConcatVectorOps instead of predicting when ops will freely concat (#130275)
The IsConcatFree helper is limited to estimates on where concatenating the subvector operands is beneficial, this patch replaces FADD/FSUB/FMUL concatenation checks with a recursive call to combineConcatVectorOps to see if it will profitably concatenate further up the chain.
Other opcodes can be moved to using the CombineSubOperand helper in future patches.
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 38 |
1 files changed, 30 insertions, 8 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 57a4c6f..0602f50 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -41903,7 +41903,8 @@ static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V, static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, ArrayRef<SDValue> Ops, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const X86Subtarget &Subtarget); + const X86Subtarget &Subtarget, + unsigned Depth = 0); /// Try to combine x86 target specific shuffles. static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL, @@ -57791,7 +57792,8 @@ CastIntSETCCtoFP(MVT VT, ISD::CondCode CC, unsigned NumSignificantBitsLHS, static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, ArrayRef<SDValue> Ops, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, - const X86Subtarget &Subtarget) { + const X86Subtarget &Subtarget, + unsigned Depth) { assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors"); unsigned EltSizeInBits = VT.getScalarSizeInBits(); @@ -57803,6 +57805,9 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, })) return getZeroVector(VT, Subtarget, DAG, DL); + if (Depth >= SelectionDAG::MaxRecursionDepth) + return SDValue(); // Limit search depth. + SDValue Op0 = Ops[0]; bool IsSplat = llvm::all_equal(Ops); unsigned NumOps = Ops.size(); @@ -57933,6 +57938,20 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, } return AllConstants || AllSubs; }; + auto CombineSubOperand = [&](MVT VT, ArrayRef<SDValue> SubOps, unsigned I) { + bool AllConstants = true; + SmallVector<SDValue> Subs; + for (SDValue SubOp : SubOps) { + SDValue BC = peekThroughBitcasts(SubOp.getOperand(I)); + AllConstants &= ISD::isBuildVectorOfConstantSDNodes(BC.getNode()) || + ISD::isBuildVectorOfConstantFPSDNodes(BC.getNode()); + Subs.push_back(SubOp.getOperand(I)); + } + if (AllConstants) + return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs); + return combineConcatVectorOps(DL, VT, Subs, DAG, DCI, Subtarget, + Depth + 1); + }; switch (Op0.getOpcode()) { case ISD::VECTOR_SHUFFLE: { @@ -58354,14 +58373,17 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, case ISD::FADD: case ISD::FSUB: case ISD::FMUL: - if (!IsSplat && (IsConcatFree(VT, Ops, 0) || IsConcatFree(VT, Ops, 1)) && - (VT.is256BitVector() || - (VT.is512BitVector() && Subtarget.useAVX512Regs()))) { - return DAG.getNode(Op0.getOpcode(), DL, VT, - ConcatSubOperand(VT, Ops, 0), - ConcatSubOperand(VT, Ops, 1)); + if (!IsSplat && (VT.is256BitVector() || + (VT.is512BitVector() && Subtarget.useAVX512Regs()))) { + SDValue Concat0 = CombineSubOperand(VT, Ops, 0); + SDValue Concat1 = CombineSubOperand(VT, Ops, 1); + if (Concat0 || Concat1) + return DAG.getNode(Op0.getOpcode(), DL, VT, + Concat0 ? Concat0 : ConcatSubOperand(VT, Ops, 0), + Concat1 ? Concat1 : ConcatSubOperand(VT, Ops, 1)); } break; + // Always prefer to concatenate high latency FDIV instructions. case ISD::FDIV: if (!IsSplat && (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.useAVX512Regs()))) { |