aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp100
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp146
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp51
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp31
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h6
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp13
6 files changed, 172 insertions, 175 deletions
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index e6e4947..9f30287 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -6784,29 +6784,26 @@ LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
}
-void CodeGenFunction::FlattenAccessAndType(
- Address Addr, QualType AddrType,
- SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
- SmallVectorImpl<QualType> &FlatTypes) {
- // WorkList is list of type we are processing + the Index List to access
- // the field of that type in Addr for use in a GEP
- llvm::SmallVector<std::pair<QualType, llvm::SmallVector<llvm::Value *, 4>>,
- 16>
+void CodeGenFunction::FlattenAccessAndTypeLValue(
+ LValue Val, SmallVectorImpl<LValue> &AccessList) {
+
+ llvm::SmallVector<
+ std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
WorkList;
llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
- // Addr should be a pointer so we need to 'dereference' it
- WorkList.push_back({AddrType, {llvm::ConstantInt::get(IdxTy, 0)}});
+ WorkList.push_back({Val, Val.getType(), {llvm::ConstantInt::get(IdxTy, 0)}});
while (!WorkList.empty()) {
- auto [T, IdxList] = WorkList.pop_back_val();
+ auto [LVal, T, IdxList] = WorkList.pop_back_val();
T = T.getCanonicalType().getUnqualifiedType();
assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL");
+
if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
uint64_t Size = CAT->getZExtSize();
for (int64_t I = Size - 1; I > -1; I--) {
llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
- WorkList.emplace_back(CAT->getElementType(), IdxListCopy);
+ WorkList.emplace_back(LVal, CAT->getElementType(), IdxListCopy);
}
} else if (const auto *RT = dyn_cast<RecordType>(T)) {
const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
@@ -6814,44 +6811,75 @@ void CodeGenFunction::FlattenAccessAndType(
const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
- llvm::SmallVector<QualType, 16> FieldTypes;
+ llvm::SmallVector<
+ std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
+ ReverseList;
if (CXXD && CXXD->isStandardLayout())
Record = CXXD->getStandardLayoutBaseWithFields();
// deal with potential base classes
if (CXXD && !CXXD->isStandardLayout()) {
- for (auto &Base : CXXD->bases())
- FieldTypes.push_back(Base.getType());
+ if (CXXD->getNumBases() > 0) {
+ assert(CXXD->getNumBases() == 1 &&
+ "HLSL doesn't support multiple inheritance.");
+ auto Base = CXXD->bases_begin();
+ llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
+ IdxListCopy.push_back(llvm::ConstantInt::get(
+ IdxTy, 0)); // base struct should be at index zero
+ ReverseList.emplace_back(LVal, Base->getType(), IdxListCopy);
+ }
}
- for (auto *FD : Record->fields())
- FieldTypes.push_back(FD->getType());
+ const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record);
- for (int64_t I = FieldTypes.size() - 1; I > -1; I--) {
- llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
- IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
- WorkList.insert(WorkList.end(), {FieldTypes[I], IdxListCopy});
+ llvm::Type *LLVMT = ConvertTypeForMem(T);
+ CharUnits Align = getContext().getTypeAlignInChars(T);
+ LValue RLValue;
+ bool createdGEP = false;
+ for (auto *FD : Record->fields()) {
+ if (FD->isBitField()) {
+ if (FD->isUnnamedBitField())
+ continue;
+ if (!createdGEP) {
+ createdGEP = true;
+ Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
+ LLVMT, Align, "gep");
+ RLValue = MakeAddrLValue(GEP, T);
+ }
+ LValue FieldLVal = EmitLValueForField(RLValue, FD, true);
+ ReverseList.push_back({FieldLVal, FD->getType(), {}});
+ } else {
+ llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
+ IdxListCopy.push_back(
+ llvm::ConstantInt::get(IdxTy, Layout.getLLVMFieldNo(FD)));
+ ReverseList.emplace_back(LVal, FD->getType(), IdxListCopy);
+ }
}
+
+ std::reverse(ReverseList.begin(), ReverseList.end());
+ llvm::append_range(WorkList, ReverseList);
} else if (const auto *VT = dyn_cast<VectorType>(T)) {
llvm::Type *LLVMT = ConvertTypeForMem(T);
CharUnits Align = getContext().getTypeAlignInChars(T);
- Address GEP =
- Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "vector.gep");
+ Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
+ Align, "vector.gep");
+ LValue Base = MakeAddrLValue(GEP, T);
for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
- llvm::Value *Idx = llvm::ConstantInt::get(IdxTy, I);
- // gep on vector fields is not recommended so combine gep with
- // extract/insert
- AccessList.emplace_back(GEP, Idx);
- FlatTypes.push_back(VT->getElementType());
+ llvm::Constant *Idx = llvm::ConstantInt::get(IdxTy, I);
+ LValue LV =
+ LValue::MakeVectorElt(Base.getAddress(), Idx, VT->getElementType(),
+ Base.getBaseInfo(), TBAAAccessInfo());
+ AccessList.emplace_back(LV);
}
- } else {
- // a scalar/builtin type
- llvm::Type *LLVMT = ConvertTypeForMem(T);
- CharUnits Align = getContext().getTypeAlignInChars(T);
- Address GEP =
- Builder.CreateInBoundsGEP(Addr, IdxList, LLVMT, Align, "gep");
- AccessList.emplace_back(GEP, nullptr);
- FlatTypes.push_back(T);
+ } else { // a scalar/builtin type
+ if (!IdxList.empty()) {
+ llvm::Type *LLVMT = ConvertTypeForMem(T);
+ CharUnits Align = getContext().getTypeAlignInChars(T);
+ Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
+ LLVMT, Align, "gep");
+ AccessList.emplace_back(MakeAddrLValue(GEP, T));
+ } else // must be a bitfield we already created an lvalue for
+ AccessList.emplace_back(LVal);
}
}
}
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index b8150a2..07b9aeb 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -488,100 +488,62 @@ static bool isTrivialFiller(Expr *E) {
return false;
}
-static void EmitHLSLAggregateSplatCast(CodeGenFunction &CGF, Address DestVal,
- QualType DestTy, llvm::Value *SrcVal,
- QualType SrcTy, SourceLocation Loc) {
+// emit an elementwise cast where the RHS is a scalar or vector
+// or emit an aggregate splat cast
+static void EmitHLSLScalarElementwiseAndSplatCasts(CodeGenFunction &CGF,
+ LValue DestVal,
+ llvm::Value *SrcVal,
+ QualType SrcTy,
+ SourceLocation Loc) {
// Flatten our destination
- SmallVector<QualType> DestTypes; // Flattened type
- SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList;
- // ^^ Flattened accesses to DestVal we want to store into
- CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes);
-
- assert(SrcTy->isScalarType() && "Invalid HLSL Aggregate splat cast.");
- for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; ++I) {
- llvm::Value *Cast =
- CGF.EmitScalarConversion(SrcVal, SrcTy, DestTypes[I], Loc);
-
- // store back
- llvm::Value *Idx = StoreGEPList[I].second;
- if (Idx) {
- llvm::Value *V =
- CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert");
- Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx);
- }
- CGF.Builder.CreateStore(Cast, StoreGEPList[I].first);
- }
-}
-
-// emit a flat cast where the RHS is a scalar, including vector
-static void EmitHLSLScalarFlatCast(CodeGenFunction &CGF, Address DestVal,
- QualType DestTy, llvm::Value *SrcVal,
- QualType SrcTy, SourceLocation Loc) {
- // Flatten our destination
- SmallVector<QualType, 16> DestTypes; // Flattened type
- SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList;
- // ^^ Flattened accesses to DestVal we want to store into
- CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes);
-
- assert(SrcTy->isVectorType() && "HLSL Flat cast doesn't handle splatting.");
- const VectorType *VT = SrcTy->getAs<VectorType>();
- SrcTy = VT->getElementType();
- assert(StoreGEPList.size() <= VT->getNumElements() &&
- "Cannot perform HLSL flat cast when vector source \
- object has less elements than flattened destination \
- object.");
- for (unsigned I = 0, Size = StoreGEPList.size(); I < Size; I++) {
- llvm::Value *Load = CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load");
+ SmallVector<LValue, 16> StoreList;
+ CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
+
+ bool isVector = false;
+ if (auto *VT = SrcTy->getAs<VectorType>()) {
+ isVector = true;
+ SrcTy = VT->getElementType();
+ assert(StoreList.size() <= VT->getNumElements() &&
+ "Cannot perform HLSL flat cast when vector source \
+ object has less elements than flattened destination \
+ object.");
+ }
+
+ for (unsigned I = 0, Size = StoreList.size(); I < Size; I++) {
+ LValue DestLVal = StoreList[I];
+ llvm::Value *Load =
+ isVector ? CGF.Builder.CreateExtractElement(SrcVal, I, "vec.load")
+ : SrcVal;
llvm::Value *Cast =
- CGF.EmitScalarConversion(Load, SrcTy, DestTypes[I], Loc);
-
- // store back
- llvm::Value *Idx = StoreGEPList[I].second;
- if (Idx) {
- llvm::Value *V =
- CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert");
- Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx);
- }
- CGF.Builder.CreateStore(Cast, StoreGEPList[I].first);
+ CGF.EmitScalarConversion(Load, SrcTy, DestLVal.getType(), Loc);
+ CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
}
}
// emit a flat cast where the RHS is an aggregate
-static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address DestVal,
- QualType DestTy, Address SrcVal,
- QualType SrcTy, SourceLocation Loc) {
+static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal,
+ LValue SrcVal, SourceLocation Loc) {
// Flatten our destination
- SmallVector<QualType, 16> DestTypes; // Flattened type
- SmallVector<std::pair<Address, llvm::Value *>, 16> StoreGEPList;
- // ^^ Flattened accesses to DestVal we want to store into
- CGF.FlattenAccessAndType(DestVal, DestTy, StoreGEPList, DestTypes);
+ SmallVector<LValue, 16> StoreList;
+ CGF.FlattenAccessAndTypeLValue(DestVal, StoreList);
// Flatten our src
- SmallVector<QualType, 16> SrcTypes; // Flattened type
- SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList;
- // ^^ Flattened accesses to SrcVal we want to load from
- CGF.FlattenAccessAndType(SrcVal, SrcTy, LoadGEPList, SrcTypes);
+ SmallVector<LValue, 16> LoadList;
+ CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
- assert(StoreGEPList.size() <= LoadGEPList.size() &&
- "Cannot perform HLSL flat cast when flattened source object \
+ assert(StoreList.size() <= LoadList.size() &&
+ "Cannot perform HLSL elementwise cast when flattened source object \
has less elements than flattened destination object.");
- // apply casts to what we load from LoadGEPList
+ // apply casts to what we load from LoadList
// and store result in Dest
- for (unsigned I = 0, E = StoreGEPList.size(); I < E; I++) {
- llvm::Value *Idx = LoadGEPList[I].second;
- llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load");
- Load =
- Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load;
- llvm::Value *Cast =
- CGF.EmitScalarConversion(Load, SrcTypes[I], DestTypes[I], Loc);
-
- // store back
- Idx = StoreGEPList[I].second;
- if (Idx) {
- llvm::Value *V =
- CGF.Builder.CreateLoad(StoreGEPList[I].first, "load.for.insert");
- Cast = CGF.Builder.CreateInsertElement(V, Cast, Idx);
- }
- CGF.Builder.CreateStore(Cast, StoreGEPList[I].first);
+ for (unsigned I = 0, E = StoreList.size(); I < E; I++) {
+ LValue DestLVal = StoreList[I];
+ LValue SrcLVal = LoadList[I];
+ RValue RVal = CGF.EmitLoadOfLValue(SrcLVal, Loc);
+ assert(RVal.isScalar() && "All flattened source values should be scalars");
+ llvm::Value *Val = RVal.getScalarVal();
+ llvm::Value *Cast = CGF.EmitScalarConversion(Val, SrcLVal.getType(),
+ DestLVal.getType(), Loc);
+ CGF.EmitStoreThroughLValue(RValue::get(Cast), DestLVal);
}
}
@@ -988,31 +950,33 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
Expr *Src = E->getSubExpr();
QualType SrcTy = Src->getType();
RValue RV = CGF.EmitAnyExpr(Src);
- QualType DestTy = E->getType();
- Address DestVal = Dest.getAddress();
+ LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
SourceLocation Loc = E->getExprLoc();
- assert(RV.isScalar() && "RHS of HLSL splat cast must be a scalar.");
+ assert(RV.isScalar() && SrcTy->isScalarType() &&
+ "RHS of HLSL splat cast must be a scalar.");
llvm::Value *SrcVal = RV.getScalarVal();
- EmitHLSLAggregateSplatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc);
+ EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
break;
}
case CK_HLSLElementwiseCast: {
Expr *Src = E->getSubExpr();
QualType SrcTy = Src->getType();
RValue RV = CGF.EmitAnyExpr(Src);
- QualType DestTy = E->getType();
- Address DestVal = Dest.getAddress();
+ LValue DestLVal = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
SourceLocation Loc = E->getExprLoc();
if (RV.isScalar()) {
llvm::Value *SrcVal = RV.getScalarVal();
- EmitHLSLScalarFlatCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc);
+ assert(SrcTy->isVectorType() &&
+ "HLSL Elementwise cast doesn't handle splatting.");
+ EmitHLSLScalarElementwiseAndSplatCasts(CGF, DestLVal, SrcVal, SrcTy, Loc);
} else {
assert(RV.isAggregate() &&
"Can't perform HLSL Aggregate cast on a complex type.");
Address SrcVal = RV.getAggregateAddress();
- EmitHLSLElementwiseCast(CGF, DestVal, DestTy, SrcVal, SrcTy, Loc);
+ EmitHLSLElementwiseCast(CGF, DestLVal, CGF.MakeAddrLValue(SrcVal, SrcTy),
+ Loc);
}
break;
}
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index c961222..06d9d81 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -2397,39 +2397,37 @@ bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
}
// RHS is an aggregate type
-static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, Address RHSVal,
- QualType RHSTy, QualType LHSTy,
- SourceLocation Loc) {
- SmallVector<std::pair<Address, llvm::Value *>, 16> LoadGEPList;
- SmallVector<QualType, 16> SrcTypes; // Flattened type
- CGF.FlattenAccessAndType(RHSVal, RHSTy, LoadGEPList, SrcTypes);
- // LHS is either a vector or a builtin?
+static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue SrcVal,
+ QualType DestTy, SourceLocation Loc) {
+ SmallVector<LValue, 16> LoadList;
+ CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
+ // Dest is either a vector or a builtin?
// if its a vector create a temp alloca to store into and return that
- if (auto *VecTy = LHSTy->getAs<VectorType>()) {
- assert(SrcTypes.size() >= VecTy->getNumElements() &&
- "Flattened type on RHS must have more elements than vector on LHS.");
+ if (auto *VecTy = DestTy->getAs<VectorType>()) {
+ assert(LoadList.size() >= VecTy->getNumElements() &&
+ "Flattened type on RHS must have the same number or more elements "
+ "than vector on LHS.");
llvm::Value *V =
- CGF.Builder.CreateLoad(CGF.CreateIRTemp(LHSTy, "flatcast.tmp"));
+ CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
// write to V.
for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
- llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[I].first, "load");
- llvm::Value *Idx = LoadGEPList[I].second;
- Load = Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract")
- : Load;
- llvm::Value *Cast = CGF.EmitScalarConversion(
- Load, SrcTypes[I], VecTy->getElementType(), Loc);
+ RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
+ assert(RVal.isScalar() &&
+ "All flattened source values should be scalars.");
+ llvm::Value *Cast =
+ CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
+ VecTy->getElementType(), Loc);
V = CGF.Builder.CreateInsertElement(V, Cast, I);
}
return V;
}
- // i its a builtin just do an extract element or load.
- assert(LHSTy->isBuiltinType() &&
+ // if its a builtin just do an extract element or load.
+ assert(DestTy->isBuiltinType() &&
"Destination type must be a vector or builtin type.");
- llvm::Value *Load = CGF.Builder.CreateLoad(LoadGEPList[0].first, "load");
- llvm::Value *Idx = LoadGEPList[0].second;
- Load =
- Idx ? CGF.Builder.CreateExtractElement(Load, Idx, "vec.extract") : Load;
- return CGF.EmitScalarConversion(Load, LHSTy, SrcTypes[0], Loc);
+ RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
+ assert(RVal.isScalar() && "All flattened source values should be scalars.");
+ return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
+ DestTy, Loc);
}
// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
@@ -2954,12 +2952,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_HLSLElementwiseCast: {
RValue RV = CGF.EmitAnyExpr(E);
SourceLocation Loc = CE->getExprLoc();
- QualType SrcTy = E->getType();
assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
// RHS is an aggregate
- Address SrcVal = RV.getAggregateAddress();
- return EmitHLSLElementwiseCast(CGF, SrcVal, SrcTy, DestTy, Loc);
+ LValue SrcVal = CGF.MakeAddrLValue(RV.getAggregateAddress(), E->getType());
+ return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
}
} // end of switch
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 8cda583..fa94692 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -6808,12 +6808,13 @@ public:
/// they were computed by collectAttachPtrExprInfo(), if they are semantically
/// different.
struct AttachPtrExprComparator {
- const MappableExprsHandler *Handler = nullptr;
+ const MappableExprsHandler &Handler;
// Cache of previous equality comparison results.
mutable llvm::DenseMap<std::pair<const Expr *, const Expr *>, bool>
CachedEqualityComparisons;
- AttachPtrExprComparator(const MappableExprsHandler *H) : Handler(H) {}
+ AttachPtrExprComparator(const MappableExprsHandler &H) : Handler(H) {}
+ AttachPtrExprComparator() = delete;
// Return true iff LHS is "less than" RHS.
bool operator()(const Expr *LHS, const Expr *RHS) const {
@@ -6821,15 +6822,15 @@ public:
return false;
// First, compare by complexity (depth)
- const auto ItLHS = Handler->AttachPtrComponentDepthMap.find(LHS);
- const auto ItRHS = Handler->AttachPtrComponentDepthMap.find(RHS);
+ const auto ItLHS = Handler.AttachPtrComponentDepthMap.find(LHS);
+ const auto ItRHS = Handler.AttachPtrComponentDepthMap.find(RHS);
std::optional<size_t> DepthLHS =
- (ItLHS != Handler->AttachPtrComponentDepthMap.end()) ? ItLHS->second
- : std::nullopt;
+ (ItLHS != Handler.AttachPtrComponentDepthMap.end()) ? ItLHS->second
+ : std::nullopt;
std::optional<size_t> DepthRHS =
- (ItRHS != Handler->AttachPtrComponentDepthMap.end()) ? ItRHS->second
- : std::nullopt;
+ (ItRHS != Handler.AttachPtrComponentDepthMap.end()) ? ItRHS->second
+ : std::nullopt;
// std::nullopt (no attach pointer) has lowest complexity
if (!DepthLHS.has_value() && !DepthRHS.has_value()) {
@@ -6877,8 +6878,8 @@ public:
/// Returns true iff LHS was computed before RHS by
/// collectAttachPtrExprInfo().
bool wasComputedBefore(const Expr *LHS, const Expr *RHS) const {
- const size_t &OrderLHS = Handler->AttachPtrComputationOrderMap.at(LHS);
- const size_t &OrderRHS = Handler->AttachPtrComputationOrderMap.at(RHS);
+ const size_t &OrderLHS = Handler.AttachPtrComputationOrderMap.at(LHS);
+ const size_t &OrderRHS = Handler.AttachPtrComputationOrderMap.at(RHS);
return OrderLHS < OrderRHS;
}
@@ -6897,7 +6898,7 @@ public:
if (!LHS || !RHS)
return false;
- ASTContext &Ctx = Handler->CGF.getContext();
+ ASTContext &Ctx = Handler.CGF.getContext();
// Strip away parentheses and no-op casts to get to the core expression
LHS = LHS->IgnoreParenNoopCasts(Ctx);
RHS = RHS->IgnoreParenNoopCasts(Ctx);
@@ -7246,6 +7247,10 @@ private:
llvm::DenseMap<const Expr *, size_t> AttachPtrComputationOrderMap = {
{nullptr, 0}};
+ /// An instance of attach-ptr-expr comparator that can be used throughout the
+ /// lifetime of this handler.
+ AttachPtrExprComparator AttachPtrComparator;
+
llvm::Value *getExprTypeSize(const Expr *E) const {
QualType ExprTy = E->getType().getCanonicalType();
@@ -8963,7 +8968,7 @@ private:
public:
MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
- : CurDir(&Dir), CGF(CGF) {
+ : CurDir(&Dir), CGF(CGF), AttachPtrComparator(*this) {
// Extract firstprivate clause information.
for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
for (const auto *D : C->varlist())
@@ -9009,7 +9014,7 @@ public:
/// Constructor for the declare mapper directive.
MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
- : CurDir(&Dir), CGF(CGF) {}
+ : CurDir(&Dir), CGF(CGF), AttachPtrComparator(*this) {}
/// Generate code for the combined entry if we have a partially mapped struct
/// and take care of the mapping flags of the arguments corresponding to
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index f0565c1..99de6e1 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -4464,10 +4464,8 @@ public:
AggValueSlot slot = AggValueSlot::ignored());
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
- void FlattenAccessAndType(
- Address Addr, QualType AddrTy,
- SmallVectorImpl<std::pair<Address, llvm::Value *>> &AccessList,
- SmallVectorImpl<QualType> &FlatTypes);
+ void FlattenAccessAndTypeLValue(LValue LVal,
+ SmallVectorImpl<LValue> &AccessList);
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index f6f7f22..8d019d4 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -493,10 +493,15 @@ CodeGenModule::CodeGenModule(ASTContext &C,
auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
CodeGenOpts.ProfileInstrumentUsePath, *FS,
CodeGenOpts.ProfileRemappingFile);
- // We're checking for profile read errors in CompilerInvocation, so if
- // there was an error it should've already been caught. If it hasn't been
- // somehow, trip an assertion.
- assert(ReaderOrErr);
+ if (auto E = ReaderOrErr.takeError()) {
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "Error in reading profile %0: %1");
+ llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
+ Diags.Report(DiagID)
+ << CodeGenOpts.ProfileInstrumentUsePath << EI.message();
+ });
+ return;
+ }
PGOReader = std::move(ReaderOrErr.get());
}