aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/ConstantFolding.cpp
diff options
context:
space:
mode:
authorBjörn Pettersson <bjorn.a.pettersson@ericsson.com>2024-02-15 15:40:21 +0100
committerGitHub <noreply@github.com>2024-02-15 15:40:21 +0100
commit7677453886a665b37f22c77434b36a680aba6ebb (patch)
treecc16b57d67ab87d6551c9b9be502515068c5f104 /llvm/lib/Analysis/ConstantFolding.cpp
parent84165864d458edce750f3a10d5dbd348970893b6 (diff)
downloadllvm-7677453886a665b37f22c77434b36a680aba6ebb.zip
llvm-7677453886a665b37f22c77434b36a680aba6ebb.tar.gz
llvm-7677453886a665b37f22c77434b36a680aba6ebb.tar.bz2
[ConstantFolding] Do not consider padded-in-memory types as uniform (#81854)
Teaching ConstantFoldLoadFromUniformValue that types that are padded in memory can't be considered as uniform. Using the big hammer to prevent optimizations when loading from a constant for which DataLayout::typeSizeEqualsStoreSize would return false. Main problem solved would be something like this: store i17 -1, ptr %p, align 4 %v = load i8, ptr %p, align 1 If for example the i17 occupies 32 bits in memory, then LLVM IR doesn't really tell where the padding goes. And even if we assume that the 15 most significant bits are padding, then they should be considered as undefined (even if LLVM backend typically would pad with zeroes). Anyway, for a big-endian target the load would read those most significant bits, which aren't guaranteed to be one's. So it would be wrong to constant fold the load as returning -1. If LLVM IR had been more explicit about the placement of padding, then we could allow the constant fold of the load in the example, but only for little-endian. Fixes: https://github.com/llvm/llvm-project/issues/81793
Diffstat (limited to 'llvm/lib/Analysis/ConstantFolding.cpp')
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp17
1 files changed, 11 insertions, 6 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 90da339..8b7031e 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -106,7 +106,7 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
"Invalid constantexpr bitcast!");
// Catch the obvious splat cases.
- if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
+ if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
return Res;
if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
@@ -342,7 +342,7 @@ bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
}
Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
- const DataLayout &DL) {
+ const DataLayout &DL) {
do {
Type *SrcTy = C->getType();
if (SrcTy == DestTy)
@@ -355,7 +355,7 @@ Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
// Catch the obvious splat cases (since all-zeros can coerce non-integral
// pointers legally).
- if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy))
+ if (Constant *Res = ConstantFoldLoadFromUniformValue(C, DestTy, DL))
return Res;
// If the type sizes are the same and a cast is legal, just directly
@@ -709,7 +709,7 @@ Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
return PoisonValue::get(Ty);
// Try an offset-independent fold of a uniform value.
- if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty))
+ if (Constant *Result = ConstantFoldLoadFromUniformValue(C, Ty, DL))
return Result;
// Try hard to fold loads from bitcasted strange and non-type-safe things.
@@ -745,7 +745,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
// If this load comes from anywhere in a uniform constant global, the value
// is always the same, regardless of the loaded offset.
- return ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty);
+ return ConstantFoldLoadFromUniformValue(GV->getInitializer(), Ty, DL);
}
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
@@ -754,11 +754,16 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
return ConstantFoldLoadFromConstPtr(C, Ty, Offset, DL);
}
-Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty) {
+Constant *llvm::ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty,
+ const DataLayout &DL) {
if (isa<PoisonValue>(C))
return PoisonValue::get(Ty);
if (isa<UndefValue>(C))
return UndefValue::get(Ty);
+ // If padding is needed when storing C to memory, then it isn't considered as
+ // uniform.
+ if (!DL.typeSizeEqualsStoreSize(C->getType()))
+ return nullptr;
if (C->isNullValue() && !Ty->isX86_MMXTy() && !Ty->isX86_AMXTy())
return Constant::getNullValue(Ty);
if (C->isAllOnesValue() &&