diff options
author | Philip Reames <preames@rivosinc.com> | 2025-07-21 11:07:41 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-07-21 11:07:41 -0700 |
commit | 881b3fdfad30ca7e945fab4c68822f6bdecf06af (patch) | |
tree | 1e41f4e6d63f2a068b2a7c2173d17c65b672ee01 /llvm/lib | |
parent | e202dba288edd47f1b370cc43aa8cd36a924e7c1 (diff) | |
download | llvm-881b3fdfad30ca7e945fab4c68822f6bdecf06af.zip llvm-881b3fdfad30ca7e945fab4c68822f6bdecf06af.tar.gz llvm-881b3fdfad30ca7e945fab4c68822f6bdecf06af.tar.bz2 |
[RISCV][IA] Support masked.load for deinterleaveN matching (#149556)
This builds on the whole series of recent API reworks to implement
support for deinterleaveN of masked.load. The goal is to be able to
enable masked interleave groups in the vectorizer once all the codegen
and costing pieces are in place.
I considered including the shuffle path support in this review as well
(since the RISCV target specific stuff should be common), but decided to
separate it into it's own review just to focus attention on one thing at
a time.
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/InterleavedAccessPass.cpp | 42 | ||||
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp | 44 |
2 files changed, 59 insertions, 27 deletions
diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp index d2b2edf..525ef32 100644 --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -601,31 +601,47 @@ static Value *getMask(Value *WideMask, unsigned Factor, bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic( IntrinsicInst *DI, SmallSetVector<Instruction *, 32> &DeadInsts) { Value *LoadedVal = DI->getOperand(0); - if (!LoadedVal->hasOneUse() || !isa<LoadInst, VPIntrinsic>(LoadedVal)) + if (!LoadedVal->hasOneUse()) + return false; + + auto *LI = dyn_cast<LoadInst>(LoadedVal); + auto *II = dyn_cast<IntrinsicInst>(LoadedVal); + if (!LI && !II) return false; const unsigned Factor = getDeinterleaveIntrinsicFactor(DI->getIntrinsicID()); assert(Factor && "unexpected deinterleave intrinsic"); Value *Mask = nullptr; - if (auto *VPLoad = dyn_cast<VPIntrinsic>(LoadedVal)) { - if (VPLoad->getIntrinsicID() != Intrinsic::vp_load) + if (LI) { + if (!LI->isSimple()) return false; + + LLVM_DEBUG(dbgs() << "IA: Found a load with deinterleave intrinsic " << *DI + << " and factor = " << Factor << "\n"); + } else { + assert(II); + // Check mask operand. Handle both all-true/false and interleaved mask. - Value *WideMask = VPLoad->getOperand(1); - Mask = getMask(WideMask, Factor, getDeinterleavedVectorType(DI)); - if (!Mask) + Value *WideMask; + switch (II->getIntrinsicID()) { + default: return false; + case Intrinsic::vp_load: + WideMask = II->getOperand(1); + break; + case Intrinsic::masked_load: + WideMask = II->getOperand(2); + break; + } - LLVM_DEBUG(dbgs() << "IA: Found a vp.load with deinterleave intrinsic " - << *DI << " and factor = " << Factor << "\n"); - } else { - auto *LI = cast<LoadInst>(LoadedVal); - if (!LI->isSimple()) + Mask = getMask(WideMask, Factor, getDeinterleavedVectorType(DI)); + if (!Mask) return false; - LLVM_DEBUG(dbgs() << "IA: Found a load with deinterleave intrinsic " << *DI - << " and factor = " << Factor << "\n"); + LLVM_DEBUG(dbgs() << "IA: Found a vp.load or masked.load with deinterleave" + << " intrinsic " << *DI << " and factor = " + << Factor << "\n"); } // Try and match this with target specific intrinsics. diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp index dd68a55..6de870c 100644 --- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp +++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp @@ -131,24 +131,40 @@ static bool getMemOperands(unsigned Factor, VectorType *VTy, Type *XLenTy, : Constant::getAllOnesValue(XLenTy); return true; } - auto *VPLdSt = cast<VPIntrinsic>(I); - assert((VPLdSt->getIntrinsicID() == Intrinsic::vp_load || - VPLdSt->getIntrinsicID() == Intrinsic::vp_store) && - "Unexpected intrinsic"); - Ptr = VPLdSt->getMemoryPointerParam(); - Alignment = VPLdSt->getPointerAlignment().value_or( - DL.getABITypeAlign(VTy->getElementType())); + if (auto *VPLdSt = dyn_cast<VPIntrinsic>(I)) { + assert((VPLdSt->getIntrinsicID() == Intrinsic::vp_load || + VPLdSt->getIntrinsicID() == Intrinsic::vp_store) && + "Unexpected intrinsic"); + Ptr = VPLdSt->getMemoryPointerParam(); + Alignment = VPLdSt->getPointerAlignment().value_or( + DL.getABITypeAlign(VTy->getElementType())); + + assert(Mask && "vp.load and vp.store needs a mask!"); + + Value *WideEVL = VPLdSt->getVectorLengthParam(); + // Conservatively check if EVL is a multiple of factor, otherwise some + // (trailing) elements might be lost after the transformation. + if (!isMultipleOfN(WideEVL, I->getDataLayout(), Factor)) + return false; - assert(Mask && "vp.load and vp.store needs a mask!"); + auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor); + VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy); + return true; + } + auto *II = cast<IntrinsicInst>(I); + assert(II->getIntrinsicID() == Intrinsic::masked_load && + "Unexpected intrinsic"); + Ptr = II->getOperand(0); + Alignment = cast<ConstantInt>(II->getArgOperand(1))->getAlignValue(); - Value *WideEVL = VPLdSt->getVectorLengthParam(); - // Conservatively check if EVL is a multiple of factor, otherwise some - // (trailing) elements might be lost after the transformation. - if (!isMultipleOfN(WideEVL, I->getDataLayout(), Factor)) + if (!isa<UndefValue>(II->getOperand(3))) return false; - auto *FactorC = ConstantInt::get(WideEVL->getType(), Factor); - VL = Builder.CreateZExt(Builder.CreateExactUDiv(WideEVL, FactorC), XLenTy); + assert(Mask && "masked.load needs a mask!"); + + VL = isa<FixedVectorType>(VTy) + ? Builder.CreateElementCount(XLenTy, VTy->getElementCount()) + : Constant::getAllOnesValue(XLenTy); return true; } |