aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilip Reames <preames@rivosinc.com>2024-01-23 08:55:48 -0800
committerPhilip Reames <listmail@philipreames.com>2024-01-23 09:20:52 -0800
commit51f9e982ed5c9a1f39c50d0501ab1bcb6ebf5de4 (patch)
treee135bc460f0d838a7d350929107e2e024bd329d8
parent8a45cec934697747ac3d3a18e75833e0058fe9a1 (diff)
downloadllvm-51f9e982ed5c9a1f39c50d0501ab1bcb6ebf5de4.zip
llvm-51f9e982ed5c9a1f39c50d0501ab1bcb6ebf5de4.tar.gz
llvm-51f9e982ed5c9a1f39c50d0501ab1bcb6ebf5de4.tar.bz2
[RISCV] Use early return for select shuffle lowering [nfc]
Minor rework of the fallback case for two argument shuffles in lowerVECTOR_SHUFFLE. We had some common code which wasn't actually common, and simplified significantly once specialized for whether we had a select or not.
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp64
1 files changed, 38 insertions, 26 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 76e8d21..ff730cd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4852,41 +4852,56 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
- SmallVector<SDValue> MaskVals;
- // As a backup, shuffles can be lowered via a vrgather instruction, possibly
- // merged with a second vrgather.
- SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
-
// By default we preserve the original operand order, and use a mask to
// select LHS as true and RHS as false. However, since RVV vector selects may
// feature splats but only on the LHS, we may choose to invert our mask and
// instead select between RHS and LHS.
bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
- bool InvertMask = IsSelect == SwapOps;
+
+ if (IsSelect) {
+ // Now construct the mask that will be used by the vselect operation.
+ SmallVector<SDValue> MaskVals;
+ for (int MaskIndex : Mask) {
+ bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ SwapOps;
+ MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
+ }
+
+ if (SwapOps)
+ std::swap(V1, V2);
+
+ assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
+ MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
+ SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
+ return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
+ }
+
+
+ // As a backup, shuffles can be lowered via a vrgather instruction, possibly
+ // merged with a second vrgather.
+ SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
// Keep a track of which non-undef indices are used by each LHS/RHS shuffle
// half.
DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
- // Now construct the mask that will be used by the vselect or blended
- // vrgather operation. For vrgathers, construct the appropriate indices into
- // each vector.
+ SmallVector<SDValue> MaskVals;
+
+ // Now construct the mask that will be used by the blended vrgather operation.
+ // Cconstruct the appropriate indices into each vector.
for (int MaskIndex : Mask) {
- bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
+ bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ !SwapOps;
MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
- if (!IsSelect) {
- bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
- GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
- ? DAG.getConstant(MaskIndex, DL, XLenVT)
- : DAG.getUNDEF(XLenVT));
- GatherIndicesRHS.push_back(
- IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
- : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
- if (IsLHSOrUndefIndex && MaskIndex >= 0)
- ++LHSIndexCounts[MaskIndex];
- if (!IsLHSOrUndefIndex)
- ++RHSIndexCounts[MaskIndex - NumElts];
- }
+ bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
+ GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
+ ? DAG.getConstant(MaskIndex, DL, XLenVT)
+ : DAG.getUNDEF(XLenVT));
+ GatherIndicesRHS.push_back(
+ IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
+ : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
+ if (IsLHSOrUndefIndex && MaskIndex >= 0)
+ ++LHSIndexCounts[MaskIndex];
+ if (!IsLHSOrUndefIndex)
+ ++RHSIndexCounts[MaskIndex - NumElts];
}
if (SwapOps) {
@@ -4898,9 +4913,6 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
- if (IsSelect)
- return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
-
// We might be able to express the shuffle as a bitrotate. But even if we
// don't have Zvkb and have to expand, the expanded sequence of approx. 2
// shifts and a vor will have a higher throughput than a vrgather.