diff options
author | Dawid Jurczak <dawid_jurek@vp.pl> | 2021-08-05 12:30:19 +0200 |
---|---|---|
committer | Dawid Jurczak <dawid_jurek@vp.pl> | 2021-08-05 16:08:32 +0200 |
commit | f8cdde71950760675f81fa89fcd603b4391908a4 (patch) | |
tree | 891a4b28060659dfa2e5e8e7762e496bc9530f6b | |
parent | d0c3b61498ecc04c9d95f6af7eb7560727500d56 (diff) | |
download | llvm-f8cdde71950760675f81fa89fcd603b4391908a4.zip llvm-f8cdde71950760675f81fa89fcd603b4391908a4.tar.gz llvm-f8cdde71950760675f81fa89fcd603b4391908a4.tar.bz2 |
[SimplifyLibCalls][NFC] Clean up LibCallSimplifier from 'memset + malloc into calloc' transformation
FoldMallocMemset can be safely removed because since https://reviews.llvm.org/D103009
such transformation is already performed in DSE.
Differential Revision: https://reviews.llvm.org/D103451
-rw-r--r-- | llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp | 50 | ||||
-rw-r--r-- | llvm/test/Transforms/InstCombine/memset-1.ll | 12 |
3 files changed, 8 insertions, 56 deletions
diff --git a/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h index 8703434..a88e72f 100644 --- a/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h +++ b/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h @@ -132,8 +132,6 @@ private: eraseFromParent(I); } - Value *foldMallocMemset(CallInst *Memset, IRBuilderBase &B); - public: LibCallSimplifier( const DataLayout &DL, const TargetLibraryInfo *TLI, diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index b8e0f63..608b77c 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -1156,59 +1156,12 @@ Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilderBase &B) { return CI->getArgOperand(0); } -/// Fold memset[_chk](malloc(n), 0, n) --> calloc(1, n). -Value *LibCallSimplifier::foldMallocMemset(CallInst *Memset, IRBuilderBase &B) { - // This has to be a memset of zeros (bzero). - auto *FillValue = dyn_cast<ConstantInt>(Memset->getArgOperand(1)); - if (!FillValue || FillValue->getZExtValue() != 0) - return nullptr; - - // TODO: We should handle the case where the malloc has more than one use. - // This is necessary to optimize common patterns such as when the result of - // the malloc is checked against null or when a memset intrinsic is used in - // place of a memset library call. - auto *Malloc = dyn_cast<CallInst>(Memset->getArgOperand(0)); - if (!Malloc || !Malloc->hasOneUse()) - return nullptr; - - // Is the inner call really malloc()? - Function *InnerCallee = Malloc->getCalledFunction(); - if (!InnerCallee) - return nullptr; - - LibFunc Func; - if (!TLI->getLibFunc(*InnerCallee, Func) || !TLI->has(Func) || - Func != LibFunc_malloc) - return nullptr; - - // The memset must cover the same number of bytes that are malloc'd. - if (Memset->getArgOperand(2) != Malloc->getArgOperand(0)) - return nullptr; - - // Replace the malloc with a calloc. We need the data layout to know what the - // actual size of a 'size_t' parameter is. - B.SetInsertPoint(Malloc->getParent(), ++Malloc->getIterator()); - const DataLayout &DL = Malloc->getModule()->getDataLayout(); - IntegerType *SizeType = DL.getIntPtrType(B.GetInsertBlock()->getContext()); - if (Value *Calloc = emitCalloc(ConstantInt::get(SizeType, 1), - Malloc->getArgOperand(0), - Malloc->getAttributes(), B, *TLI)) { - substituteInParent(Malloc, Calloc); - return Calloc; - } - - return nullptr; -} - Value *LibCallSimplifier::optimizeMemSet(CallInst *CI, IRBuilderBase &B) { Value *Size = CI->getArgOperand(2); annotateNonNullAndDereferenceable(CI, 0, Size, DL); if (isa<IntrinsicInst>(CI)) return nullptr; - if (auto *Calloc = foldMallocMemset(CI, B)) - return Calloc; - // memset(p, v, n) -> llvm.memset(align 1 p, v, n) Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false); CallInst *NewCI = B.CreateMemSet(CI->getArgOperand(0), Val, Size, Align(1)); @@ -3066,7 +3019,6 @@ Value *LibCallSimplifier::optimizeCall(CallInst *CI, IRBuilderBase &Builder) { return optimizeLog(CI, Builder); case Intrinsic::sqrt: return optimizeSqrt(CI, Builder); - // TODO: Use foldMallocMemset() with memset intrinsic. case Intrinsic::memset: return optimizeMemSet(CI, Builder); case Intrinsic::memcpy: @@ -3289,8 +3241,6 @@ Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI, Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI, IRBuilderBase &B) { - // TODO: Try foldMallocMemset() here. - if (isFortifiedCallFoldable(CI, 3, 2)) { Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false); CallInst *NewCI = B.CreateMemSet(CI->getArgOperand(0), Val, diff --git a/llvm/test/Transforms/InstCombine/memset-1.ll b/llvm/test/Transforms/InstCombine/memset-1.ll index 509cda4..3016081 100644 --- a/llvm/test/Transforms/InstCombine/memset-1.ll +++ b/llvm/test/Transforms/InstCombine/memset-1.ll @@ -21,17 +21,20 @@ define i8* @test_simplify1(i8* %mem, i32 %val, i32 %size) { ret i8* %ret } +; Malloc + memset pattern is now handled by DSE in a more general way. + define i8* @pr25892_lite(i32 %size) #0 { ; CHECK-LABEL: @pr25892_lite( -; CHECK-NEXT: [[CALLOC:%.*]] = call i8* @calloc(i32 1, i32 [[SIZE:%.*]]) -; CHECK-NEXT: ret i8* [[CALLOC]] +; CHECK-NEXT: [[CALL:%.*]] = call i8* @malloc(i32 [[SIZE:%.*]]) +; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[CALL]], i8 0, i32 [[SIZE]], i1 false) ; %call1 = call i8* @malloc(i32 %size) #1 %call2 = call i8* @memset(i8* %call1, i32 0, i32 %size) #1 ret i8* %call2 } -; FIXME: A memset intrinsic should be handled similarly to a memset() libcall. +; A memset intrinsic should be handled similarly to a memset() libcall. +; Notice that malloc + memset pattern is now handled by DSE in a more general way. define i8* @malloc_and_memset_intrinsic(i32 %n) #0 { ; CHECK-LABEL: @malloc_and_memset_intrinsic( @@ -45,6 +48,7 @@ define i8* @malloc_and_memset_intrinsic(i32 %n) #0 { } ; This should not create a calloc and should not crash the compiler. +; Notice that malloc + memset pattern is now handled by DSE in a more general way. define i8* @notmalloc_memset(i32 %size, i8*(i32)* %notmalloc) { ; CHECK-LABEL: @notmalloc_memset( @@ -57,8 +61,8 @@ define i8* @notmalloc_memset(i32 %size, i8*(i32)* %notmalloc) { ret i8* %call2 } -; FIXME: memset(malloc(x), 0, x) -> calloc(1, x) ; This doesn't fire currently because the malloc has more than one use. +; Notice that malloc + memset pattern is now handled by DSE in a more general way. define float* @pr25892(i32 %size) #0 { ; CHECK-LABEL: @pr25892( |