aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Transforms')
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp10
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp3
4 files changed, 22 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index e30e8d0..f62a8ca 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -768,6 +768,11 @@ static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
// the knowledge that padding exists for the rest of the pipeline.
const DataLayout &DL = IC.getDataLayout();
auto *SL = DL.getStructLayout(ST);
+
+ // Don't unpack for structure with scalable vector.
+ if (SL->getSizeInBits().isScalable())
+ return nullptr;
+
if (SL->hasPadding())
return nullptr;
@@ -1291,6 +1296,11 @@ static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
// the knowledge that padding exists for the rest of the pipeline.
const DataLayout &DL = IC.getDataLayout();
auto *SL = DL.getStructLayout(ST);
+
+ // Don't unpack for structure with scalable vector.
+ if (SL->getSizeInBits().isScalable())
+ return false;
+
if (SL->hasPadding())
return false;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 41cbcb1..b1f4e9f8 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2787,6 +2787,11 @@ Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {
return R;
if (LoadInst *L = dyn_cast<LoadInst>(Agg)) {
+ // Bail out if the aggregate contains scalable vector type
+ if (auto *STy = dyn_cast<StructType>(Agg->getType());
+ STy && STy->containsScalableVectorType())
+ return nullptr;
+
// If the (non-volatile) load only has one use, we can rewrite this to a
// load from a GEP. This reduces the size of the load. If a load is used
// only by extractvalue instructions then this either must have been
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index cecd4ac..f6848d1 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -3977,6 +3977,10 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
+
+ if (SL->getSizeInBits().isScalable())
+ return nullptr;
+
if (Offset >= SL->getSizeInBytes())
return nullptr;
uint64_t EndOffset = Offset + Size;
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 14604df..003e7d3 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -522,6 +522,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// the struct fields.
if (Ops.empty())
break;
+ assert(
+ !STy->containsScalableVectorType() &&
+ "GEPs are not supported on structures containing scalable vectors");
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
const StructLayout &SL = *DL.getStructLayout(STy);