diff options
Diffstat (limited to 'llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp')
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp | 32 |
1 files changed, 4 insertions, 28 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp index 0565fcd..17e2f01 100644 --- a/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp +++ b/llvm/lib/Target/RISCV/RISCVInterleavedAccess.cpp @@ -32,7 +32,7 @@ bool RISCVTargetLowering::isLegalInterleavedAccessType( if (!isTypeLegal(VT)) return false; - if (!isLegalElementTypeForRVV(VT.getScalarType()) || + if (!isLegalLoadStoreElementTypeForRVV(VT.getScalarType()) || !allowsMemoryAccessForAlignment(VTy->getContext(), DL, VT, AddrSpace, Alignment)) return false; @@ -216,29 +216,6 @@ bool RISCVTargetLowering::lowerInterleavedLoad( if (!isLegalInterleavedAccessType(VTy, Factor, Alignment, AS, DL)) return false; - // If the segment load is going to be performed segment at a time anyways - // and there's only one element used, use a strided load instead. This - // will be equally fast, and create less vector register pressure. - if (Indices.size() == 1 && !Subtarget.hasOptimizedSegmentLoadStore(Factor)) { - unsigned ScalarSizeInBytes = DL.getTypeStoreSize(VTy->getElementType()); - Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes); - Value *Offset = ConstantInt::get(XLenTy, Indices[0] * ScalarSizeInBytes); - Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset); - // Note: Same VL as above, but i32 not xlen due to signature of - // vp.strided.load - VL = Builder.CreateElementCount(Builder.getInt32Ty(), - VTy->getElementCount()); - CallInst *CI = - Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_load, - {VTy, BasePtr->getType(), Stride->getType()}, - {BasePtr, Stride, Mask, VL}); - Alignment = commonAlignment(Alignment, Indices[0] * ScalarSizeInBytes); - CI->addParamAttr(0, - Attribute::getWithAlignment(CI->getContext(), Alignment)); - Shuffles[0]->replaceAllUsesWith(CI); - return true; - }; - CallInst *VlsegN = Builder.CreateIntrinsic( FixedVlsegIntrIds[Factor - 2], {VTy, PtrTy, XLenTy}, {Ptr, Mask, VL}); @@ -302,10 +279,9 @@ bool RISCVTargetLowering::lowerInterleavedStore(Instruction *Store, Value *Stride = ConstantInt::get(XLenTy, Factor * ScalarSizeInBytes); Value *Offset = ConstantInt::get(XLenTy, Index * ScalarSizeInBytes); Value *BasePtr = Builder.CreatePtrAdd(Ptr, Offset); - // Note: Same VL as above, but i32 not xlen due to signature of - // vp.strided.store - VL = Builder.CreateElementCount(Builder.getInt32Ty(), - VTy->getElementCount()); + // For rv64, need to truncate i64 to i32 to match signature. As VL is at + // most the number of active lanes (which is bounded by i32) this is safe. + VL = Builder.CreateTrunc(VL, Builder.getInt32Ty()); CallInst *CI = Builder.CreateIntrinsic(Intrinsic::experimental_vp_strided_store, |