diff options
Diffstat (limited to 'clang/lib/CodeGen/CGCall.cpp')
-rw-r--r-- | clang/lib/CodeGen/CGCall.cpp | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 82a24f7..2f1c769 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -672,10 +672,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, args.size()); - // If we don't have a prototype at all, but we're supposed to - // explicitly use the variadic convention for unprototyped calls, - // treat all of the arguments as required but preserve the nominal - // possibility of variadics. + // If we don't have a prototype at all, but we're supposed to + // explicitly use the variadic convention for unprototyped calls, + // treat all of the arguments as required but preserve the nominal + // possibility of variadics. } else if (CGM.getTargetCodeGenInfo().isNoProtoCallVariadic( args, cast<FunctionNoProtoType>(fnType))) { required = RequiredArgs(args.size()); @@ -4061,7 +4061,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, if (results.size() == 1) { RV = results[0]; - // Otherwise, we need to make a first-class aggregate. + // Otherwise, we need to make a first-class aggregate. } else { // Construct a return type that lacks padding elements. llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); @@ -4200,11 +4200,11 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, if (type->isReferenceType()) { args.add(RValue::get(Builder.CreateLoad(local)), type); - // In ARC, move out of consumed arguments so that the release cleanup - // entered by StartFunction doesn't cause an over-release. This isn't - // optimal -O0 code generation, but it should get cleaned up when - // optimization is enabled. This also assumes that delegate calls are - // performed exactly once for a set of arguments, but that should be safe. + // In ARC, move out of consumed arguments so that the release cleanup + // entered by StartFunction doesn't cause an over-release. This isn't + // optimal -O0 code generation, but it should get cleaned up when + // optimization is enabled. This also assumes that delegate calls are + // performed exactly once for a set of arguments, but that should be safe. } else if (getLangOpts().ObjCAutoRefCount && param->hasAttr<NSConsumedAttr>() && type->isObjCRetainableType()) { llvm::Value *ptr = Builder.CreateLoad(local); @@ -4213,8 +4213,8 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, Builder.CreateStore(null, local); args.add(RValue::get(ptr), type); - // For the most part, we just need to load the alloca, except that - // aggregate r-values are actually pointers to temporaries. + // For the most part, we just need to load the alloca, except that + // aggregate r-values are actually pointers to temporaries. } else { args.add(convertTempToRValue(local, type, loc), type); } @@ -4306,7 +4306,7 @@ static void emitWriteback(CodeGenFunction &CGF, // Release the old value. CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); - // Otherwise, we can just do a normal lvalue store. + // Otherwise, we can just do a normal lvalue store. } else { CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); } @@ -4347,7 +4347,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { srcLV = CGF.EmitLValue(lvExpr); - // Otherwise, just emit it as a scalar. + // Otherwise, just emit it as a scalar. } else { Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); |