aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/IR/ConstantFold.cpp
diff options
context:
space:
mode:
authorNikita Popov <npopov@redhat.com>2024-05-31 10:37:32 +0200
committerNikita Popov <npopov@redhat.com>2024-05-31 10:37:32 +0200
commit51e459a561519c8d51e2b4cadddc0d1f99c8b7ef (patch)
treee07cb60ffab25ba3b183ff9a62be994a2f76118a /llvm/lib/IR/ConstantFold.cpp
parentb86a9c5bf2fab0408a3d549995d6e2449f71a16d (diff)
downloadllvm-51e459a561519c8d51e2b4cadddc0d1f99c8b7ef.zip
llvm-51e459a561519c8d51e2b4cadddc0d1f99c8b7ef.tar.gz
llvm-51e459a561519c8d51e2b4cadddc0d1f99c8b7ef.tar.bz2
Revert "[ConstantFold] Remove non-trivial gep-of-gep fold (#93823)"
This reverts commit e1cc9e4eaddcc295b4e775512e33b947b1514c17. This causes some non-trivial text size increases in unoptimized builds for Bullet. Revert while I investigate.
Diffstat (limited to 'llvm/lib/IR/ConstantFold.cpp')
-rw-r--r--llvm/lib/IR/ConstantFold.cpp56
1 files changed, 52 insertions, 4 deletions
diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index bac6be8..0ef92ea 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -1432,16 +1432,64 @@ static Constant *foldGEPOfGEP(GEPOperator *GEP, Type *PointeeTy, bool InBounds,
if (GEP->getInRange())
return nullptr;
- // Only handle simple case with leading zero index. We cannot perform an
- // actual addition as we don't know the correct index type size to use.
Constant *Idx0 = cast<Constant>(Idxs[0]);
- if (!Idx0->isNullValue())
+ if (Idx0->isNullValue()) {
+ // Handle the simple case of a zero index.
+ SmallVector<Value*, 16> NewIndices;
+ NewIndices.reserve(Idxs.size() + GEP->getNumIndices());
+ NewIndices.append(GEP->idx_begin(), GEP->idx_end());
+ NewIndices.append(Idxs.begin() + 1, Idxs.end());
+ return ConstantExpr::getGetElementPtr(
+ GEP->getSourceElementType(), cast<Constant>(GEP->getPointerOperand()),
+ NewIndices, InBounds && GEP->isInBounds());
+ }
+
+ gep_type_iterator LastI = gep_type_end(GEP);
+ for (gep_type_iterator I = gep_type_begin(GEP), E = gep_type_end(GEP);
+ I != E; ++I)
+ LastI = I;
+
+ // We can't combine GEPs if the last index is a struct type.
+ if (!LastI.isSequential())
+ return nullptr;
+ // We could perform the transform with non-constant index, but prefer leaving
+ // it as GEP of GEP rather than GEP of add for now.
+ ConstantInt *CI = dyn_cast<ConstantInt>(Idx0);
+ if (!CI)
+ return nullptr;
+
+ // TODO: This code may be extended to handle vectors as well.
+ auto *LastIdx = cast<Constant>(GEP->getOperand(GEP->getNumOperands()-1));
+ Type *LastIdxTy = LastIdx->getType();
+ if (LastIdxTy->isVectorTy())
return nullptr;
SmallVector<Value*, 16> NewIndices;
NewIndices.reserve(Idxs.size() + GEP->getNumIndices());
- NewIndices.append(GEP->idx_begin(), GEP->idx_end());
+ NewIndices.append(GEP->idx_begin(), GEP->idx_end() - 1);
+
+ // Add the last index of the source with the first index of the new GEP.
+ // Make sure to handle the case when they are actually different types.
+ if (LastIdxTy != Idx0->getType()) {
+ unsigned CommonExtendedWidth =
+ std::max(LastIdxTy->getIntegerBitWidth(),
+ Idx0->getType()->getIntegerBitWidth());
+ CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
+
+ Type *CommonTy =
+ Type::getIntNTy(LastIdxTy->getContext(), CommonExtendedWidth);
+ if (Idx0->getType() != CommonTy)
+ Idx0 = ConstantFoldCastInstruction(Instruction::SExt, Idx0, CommonTy);
+ if (LastIdx->getType() != CommonTy)
+ LastIdx =
+ ConstantFoldCastInstruction(Instruction::SExt, LastIdx, CommonTy);
+ if (!Idx0 || !LastIdx)
+ return nullptr;
+ }
+
+ NewIndices.push_back(ConstantExpr::get(Instruction::Add, Idx0, LastIdx));
NewIndices.append(Idxs.begin() + 1, Idxs.end());
+
return ConstantExpr::getGetElementPtr(
GEP->getSourceElementType(), cast<Constant>(GEP->getPointerOperand()),
NewIndices, InBounds && GEP->isInBounds());