diff options
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r-- | clang/lib/CodeGen/CGCall.cpp | 3 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGDebugInfo.cpp | 11 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExpr.cpp | 129 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprCXX.cpp | 12 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprScalar.cpp | 5 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGHLSLBuiltins.cpp | 13 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGHLSLRuntime.cpp | 16 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp | 7 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 7 |
9 files changed, 187 insertions, 16 deletions
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index a931ce4..c5371e4 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -3018,8 +3018,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name, ArgNo = 0; if (AddedPotentialArgAccess && MemAttrForPtrArgs) { - llvm::FunctionType *FunctionType = FunctionType = - getTypes().GetFunctionType(FI); + llvm::FunctionType *FunctionType = getTypes().GetFunctionType(FI); for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), E = FI.arg_end(); I != E; ++I, ++ArgNo) { diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index b91cb36..9fe9a13 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -900,10 +900,13 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) { assert((BT->getKind() != BuiltinType::SveCount || Info.NumVectors == 1) && "Unsupported number of vectors for svcount_t"); - // Debuggers can't extract 1bit from a vector, so will display a - // bitpattern for predicates instead. unsigned NumElems = Info.EC.getKnownMinValue() * Info.NumVectors; - if (Info.ElementType == CGM.getContext().BoolTy) { + llvm::Metadata *BitStride = nullptr; + if (BT->getKind() == BuiltinType::SveBool) { + Info.ElementType = CGM.getContext().UnsignedCharTy; + BitStride = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned( + llvm::Type::getInt64Ty(CGM.getLLVMContext()), 1)); + } else if (BT->getKind() == BuiltinType::SveCount) { NumElems /= 8; Info.ElementType = CGM.getContext().UnsignedCharTy; } @@ -929,7 +932,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) { getOrCreateType(Info.ElementType, TheCU->getFile()); auto Align = getTypeAlignIfRequired(BT, CGM.getContext()); return DBuilder.createVectorType(/*Size*/ 0, Align, ElemTy, - SubscriptArray); + SubscriptArray, BitStride); } // It doesn't make sense to generate debug info for PowerPC MMA vector types. // So we return a safe type here to avoid generating an error. diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 7dd6a83..e8255b0 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -30,6 +30,7 @@ #include "clang/AST/Attr.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/NSAPI.h" +#include "clang/AST/ParentMapContext.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/CodeGenOptions.h" @@ -1353,6 +1354,115 @@ void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) { CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN); } +namespace { +/// Infer type from a simple sizeof expression. +QualType inferTypeFromSizeofExpr(const Expr *E) { + const Expr *Arg = E->IgnoreParenImpCasts(); + if (const auto *UET = dyn_cast<UnaryExprOrTypeTraitExpr>(Arg)) { + if (UET->getKind() == UETT_SizeOf) { + if (UET->isArgumentType()) + return UET->getArgumentTypeInfo()->getType(); + else + return UET->getArgumentExpr()->getType(); + } + } + return QualType(); +} + +/// Infer type from an arithmetic expression involving a sizeof. For example: +/// +/// malloc(sizeof(MyType) + padding); // infers 'MyType' +/// malloc(sizeof(MyType) * 32); // infers 'MyType' +/// malloc(32 * sizeof(MyType)); // infers 'MyType' +/// malloc(sizeof(MyType) << 1); // infers 'MyType' +/// ... +/// +/// More complex arithmetic expressions are supported, but are a heuristic, e.g. +/// when considering allocations for structs with flexible array members: +/// +/// malloc(sizeof(HasFlexArray) + sizeof(int) * 32); // infers 'HasFlexArray' +/// +QualType inferPossibleTypeFromArithSizeofExpr(const Expr *E) { + const Expr *Arg = E->IgnoreParenImpCasts(); + // The argument is a lone sizeof expression. + if (QualType T = inferTypeFromSizeofExpr(Arg); !T.isNull()) + return T; + if (const auto *BO = dyn_cast<BinaryOperator>(Arg)) { + // Argument is an arithmetic expression. Cover common arithmetic patterns + // involving sizeof. + switch (BO->getOpcode()) { + case BO_Add: + case BO_Div: + case BO_Mul: + case BO_Shl: + case BO_Shr: + case BO_Sub: + if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getLHS()); + !T.isNull()) + return T; + if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getRHS()); + !T.isNull()) + return T; + break; + default: + break; + } + } + return QualType(); +} + +/// If the expression E is a reference to a variable, infer the type from a +/// variable's initializer if it contains a sizeof. Beware, this is a heuristic +/// and ignores if a variable is later reassigned. For example: +/// +/// size_t my_size = sizeof(MyType); +/// void *x = malloc(my_size); // infers 'MyType' +/// +QualType inferPossibleTypeFromVarInitSizeofExpr(const Expr *E) { + const Expr *Arg = E->IgnoreParenImpCasts(); + if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) { + if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { + if (const Expr *Init = VD->getInit()) + return inferPossibleTypeFromArithSizeofExpr(Init); + } + } + return QualType(); +} + +/// Deduces the allocated type by checking if the allocation call's result +/// is immediately used in a cast expression. For example: +/// +/// MyType *x = (MyType *)malloc(4096); // infers 'MyType' +/// +QualType inferPossibleTypeFromCastExpr(const CallExpr *CallE, + const CastExpr *CastE) { + if (!CastE) + return QualType(); + QualType PtrType = CastE->getType(); + if (PtrType->isPointerType()) + return PtrType->getPointeeType(); + return QualType(); +} +} // end anonymous namespace + +void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, const CallExpr *E) { + QualType AllocType; + // First check arguments. + for (const Expr *Arg : E->arguments()) { + AllocType = inferPossibleTypeFromArithSizeofExpr(Arg); + if (AllocType.isNull()) + AllocType = inferPossibleTypeFromVarInitSizeofExpr(Arg); + if (!AllocType.isNull()) + break; + } + // Then check later casts. + if (AllocType.isNull()) + AllocType = inferPossibleTypeFromCastExpr(E, CurCast); + // Emit if we were able to infer the type. + if (!AllocType.isNull()) + EmitAllocToken(CB, AllocType); +} + CodeGenFunction::ComplexPairTy CodeGenFunction:: EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { @@ -5723,6 +5833,9 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue( /// are permitted with aggregate result, including noop aggregate casts, and /// cast from scalar to union. LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { + auto RestoreCurCast = + llvm::make_scope_exit([this, Prev = CurCast] { CurCast = Prev; }); + CurCast = E; switch (E->getCastKind()) { case CK_ToVoid: case CK_BitCast: @@ -6668,16 +6781,24 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke, E == MustTailCall, E->getExprLoc()); - // Generate function declaration DISuprogram in order to be used - // in debug info about call sites. - if (CGDebugInfo *DI = getDebugInfo()) { - if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { + if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { + // Generate function declaration DISuprogram in order to be used + // in debug info about call sites. + if (CGDebugInfo *DI = getDebugInfo()) { FunctionArgList Args; QualType ResTy = BuildFunctionArgList(CalleeDecl, Args); DI->EmitFuncDeclForCallSite(LocalCallOrInvoke, DI->getFunctionType(CalleeDecl, ResTy, Args), CalleeDecl); } + if (CalleeDecl->hasAttr<RestrictAttr>() || + CalleeDecl->hasAttr<AllocSizeAttr>()) { + // Function has 'malloc' (aka. 'restrict') or 'alloc_size' attribute. + if (SanOpts.has(SanitizerKind::AllocToken)) { + // Set !alloc_token metadata. + EmitAllocToken(LocalCallOrInvoke, E); + } + } } if (CallOrInvoke) *CallOrInvoke = LocalCallOrInvoke; diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index 290c2e0..31ac266 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1371,8 +1371,16 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name)) if (auto *FD = dyn_cast<FunctionDecl>(Decl)) - if (Ctx.hasSameType(FD->getType(), QualType(Type, 0))) - return EmitNewDeleteCall(*this, FD, Type, Args); + if (Ctx.hasSameType(FD->getType(), QualType(Type, 0))) { + RValue RV = EmitNewDeleteCall(*this, FD, Type, Args); + if (auto *CB = dyn_cast_if_present<llvm::CallBase>(RV.getScalarVal())) { + if (SanOpts.has(SanitizerKind::AllocToken)) { + // Set !alloc_token metadata. + EmitAllocToken(CB, TheCall); + } + } + return RV; + } llvm_unreachable("predeclared global operator new/delete is missing"); } diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 06d9d81..715160d 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -33,6 +33,7 @@ #include "clang/Basic/DiagnosticTrap.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/APFixedPoint.h" +#include "llvm/ADT/ScopeExit.h" #include "llvm/IR/Argument.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" @@ -2434,6 +2435,10 @@ static Value *EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue SrcVal, // have to handle a more broad range of conversions than explicit casts, as they // handle things like function to ptr-to-function decay etc. Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { + auto RestoreCurCast = + llvm::make_scope_exit([this, Prev = CGF.CurCast] { CGF.CurCast = Prev; }); + CGF.CurCast = CE; + Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); CastKind Kind = CE->getCastKind(); diff --git a/clang/lib/CodeGen/CGHLSLBuiltins.cpp b/clang/lib/CodeGen/CGHLSLBuiltins.cpp index 6c0fc8d..4f2f5a76 100644 --- a/clang/lib/CodeGen/CGHLSLBuiltins.cpp +++ b/clang/lib/CodeGen/CGHLSLBuiltins.cpp @@ -352,6 +352,19 @@ Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID, SmallVector<Value *> Args{OrderID, SpaceOp, RangeOp, IndexOp, Name}; return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args); } + case Builtin::BI__builtin_hlsl_resource_counterhandlefromimplicitbinding: { + Value *MainHandle = EmitScalarExpr(E->getArg(0)); + if (!CGM.getTriple().isSPIRV()) + return MainHandle; + + llvm::Type *HandleTy = CGM.getTypes().ConvertType(E->getType()); + Value *OrderID = EmitScalarExpr(E->getArg(1)); + Value *SpaceOp = EmitScalarExpr(E->getArg(2)); + llvm::Intrinsic::ID IntrinsicID = + llvm::Intrinsic::spv_resource_counterhandlefromimplicitbinding; + SmallVector<Value *> Args{MainHandle, OrderID, SpaceOp}; + return Builder.CreateIntrinsic(HandleTy, IntrinsicID, Args); + } case Builtin::BI__builtin_hlsl_resource_nonuniformindex: { Value *IndexOp = EmitScalarExpr(E->getArg(0)); llvm::Type *RetTy = ConvertType(E->getType()); diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp index ede1780..603cef9 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.cpp +++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp @@ -145,19 +145,29 @@ static CXXMethodDecl *lookupResourceInitMethodAndSetupArgs( // explicit binding auto *RegSlot = llvm::ConstantInt::get(CGM.IntTy, Binding.getSlot()); Args.add(RValue::get(RegSlot), AST.UnsignedIntTy); - CreateMethod = lookupMethod(ResourceDecl, "__createFromBinding", SC_Static); + const char *Name = Binding.hasCounterImplicitOrderID() + ? "__createFromBindingWithImplicitCounter" + : "__createFromBinding"; + CreateMethod = lookupMethod(ResourceDecl, Name, SC_Static); } else { // implicit binding auto *OrderID = llvm::ConstantInt::get(CGM.IntTy, Binding.getImplicitOrderID()); Args.add(RValue::get(OrderID), AST.UnsignedIntTy); - CreateMethod = - lookupMethod(ResourceDecl, "__createFromImplicitBinding", SC_Static); + const char *Name = Binding.hasCounterImplicitOrderID() + ? "__createFromImplicitBindingWithImplicitCounter" + : "__createFromImplicitBinding"; + CreateMethod = lookupMethod(ResourceDecl, Name, SC_Static); } Args.add(RValue::get(Space), AST.UnsignedIntTy); Args.add(RValue::get(Range), AST.IntTy); Args.add(RValue::get(Index), AST.UnsignedIntTy); Args.add(RValue::get(NameStr), AST.getPointerType(AST.CharTy.withConst())); + if (Binding.hasCounterImplicitOrderID()) { + uint32_t CounterBinding = Binding.getCounterImplicitOrderID(); + auto *CounterOrderID = llvm::ConstantInt::get(CGM.IntTy, CounterBinding); + Args.add(RValue::get(CounterOrderID), AST.UnsignedIntTy); + } return CreateMethod; } diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp index 4272d8b..3613b6a 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp @@ -869,6 +869,8 @@ CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM) CGM.getLangOpts().OpenMPOffloadMandatory, /*HasRequiresReverseOffload*/ false, /*HasRequiresUnifiedAddress*/ false, hasRequiresUnifiedSharedMemory(), /*HasRequiresDynamicAllocators*/ false); + Config.setDefaultTargetAS( + CGM.getContext().getTargetInfo().getTargetAddressSpace(LangAS::Default)); OMPBuilder.setConfig(Config); if (!CGM.getLangOpts().OpenMPIsTargetDevice) @@ -1243,7 +1245,10 @@ void CGOpenMPRuntimeGPU::emitParallelCall( llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy); if (WFn) ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy); - llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy); + llvm::Type *FnPtrTy = llvm::PointerType::get( + CGF.getLLVMContext(), CGM.getDataLayout().getProgramAddressSpace()); + + llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, FnPtrTy); // Create a private scope that will globalize the arguments // passed from the outside of the target region. diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index e14e60c..1f0be2d 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -346,6 +346,10 @@ public: QualType FnRetTy; llvm::Function *CurFn = nullptr; + /// If a cast expression is being visited, this holds the current cast's + /// expression. + const CastExpr *CurCast = nullptr; + /// Save Parameter Decl for coroutine. llvm::SmallVector<const ParmVarDecl *, 4> FnArgs; @@ -3350,6 +3354,9 @@ public: /// Emit additional metadata used by the AllocToken instrumentation. void EmitAllocToken(llvm::CallBase *CB, QualType AllocType); + /// Emit additional metadata used by the AllocToken instrumentation, + /// inferring the type from an allocation call expression. + void EmitAllocToken(llvm::CallBase *CB, const CallExpr *E); llvm::Value *GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl); |