aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp15
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp19
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp4
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp36
5 files changed, 55 insertions, 27 deletions
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 6020684..c423c4b 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -234,9 +234,12 @@ public:
};
} // namespace
-static AllocTokenOptions getAllocTokenOptions(const CodeGenOptions &CGOpts) {
+static AllocTokenOptions getAllocTokenOptions(const LangOptions &LangOpts,
+ const CodeGenOptions &CGOpts) {
AllocTokenOptions Opts;
- Opts.MaxTokens = CGOpts.AllocTokenMax;
+ if (LangOpts.AllocTokenMode)
+ Opts.Mode = *LangOpts.AllocTokenMode;
+ Opts.MaxTokens = LangOpts.AllocTokenMax;
Opts.Extended = CGOpts.SanitizeAllocTokenExtended;
Opts.FastABI = CGOpts.SanitizeAllocTokenFastABI;
return Opts;
@@ -430,12 +433,6 @@ static bool initTargetOptions(const CompilerInstance &CI,
Options.NoInfsFPMath = LangOpts.NoHonorInfs;
Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
- LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
- (LangOpts.getDefaultFPContractMode() ==
- LangOptions::FPModeKind::FPM_Fast ||
- LangOpts.getDefaultFPContractMode() ==
- LangOptions::FPModeKind::FPM_FastHonorPragmas);
Options.BBAddrMap = CodeGenOpts.BBAddrMap;
Options.BBSections =
@@ -808,7 +805,7 @@ static void addSanitizers(const Triple &TargetTriple,
// memory allocation function detection.
MPM.addPass(InferFunctionAttrsPass());
}
- MPM.addPass(AllocTokenPass(getAllocTokenOptions(CodeGenOpts)));
+ MPM.addPass(AllocTokenPass(getAllocTokenOptions(LangOpts, CodeGenOpts)));
}
};
if (ClSanitizeOnOptimizerEarlyEP) {
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index e490b1c..3746bc04 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -1325,22 +1325,29 @@ void CodeGenModule::Release() {
"tag-stack-memory-buildattr", 1);
if (T.isARM() || T.isThumb() || T.isAArch64()) {
+ // Previously 1 is used and meant for the backed to derive the function
+ // attribute form it. 2 now means function attributes already set for all
+ // functions in this module, so no need to propagate those from the module
+ // flag. Value is only used in case of LTO module merge because the backend
+ // will see all required function attribute set already. Value is used
+ // before modules got merged. Any posive value means the feature is active
+ // and required binary markings need to be emit accordingly.
if (LangOpts.BranchTargetEnforcement)
getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
- 1);
+ 2);
if (LangOpts.BranchProtectionPAuthLR)
getModule().addModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr",
- 1);
+ 2);
if (LangOpts.GuardedControlStack)
- getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 1);
+ getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 2);
if (LangOpts.hasSignReturnAddress())
- getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 2);
if (LangOpts.isSignReturnAddressScopeAll())
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
- 1);
+ 2);
if (!LangOpts.isSignReturnAddressWithAKey())
getModule().addModuleFlag(llvm::Module::Min,
- "sign-return-address-with-bkey", 1);
+ "sign-return-address-with-bkey", 2);
if (LangOpts.PointerAuthELFGOT)
getModule().addModuleFlag(llvm::Module::Min, "ptrauth-elf-got", 1);
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 0bc4b4b7..e4ad078 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -439,9 +439,11 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
return;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (FD)
+ if (FD) {
setFunctionDeclAttributes(FD, F, M);
-
+ if (FD->hasAttr<DeviceKernelAttr>() && !M.getLangOpts().OpenCL)
+ F->setCallingConv(getDeviceKernelCallingConv());
+ }
if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
F->addFnAttr("amdgpu-ieee", "false");
}
@@ -658,7 +660,7 @@ llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
// kernel address (only the kernel descriptor).
auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
&Mod);
- F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
+ F->setCallingConv(getDeviceKernelCallingConv());
llvm::AttrBuilder KernelAttrs(C);
// FIXME: The invoke isn't applying the right attributes either
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 53f2fc4..f6715861 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -264,7 +264,7 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
// And kernel functions are not subject to inlining
F->addFnAttr(llvm::Attribute::NoInline);
if (FD->hasAttr<CUDAGlobalAttr>()) {
- F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ F->setCallingConv(getDeviceKernelCallingConv());
for (auto IV : llvm::enumerate(FD->parameters()))
if (IV.value()->hasAttr<CUDAGridConstantAttr>())
@@ -278,7 +278,7 @@ void NVPTXTargetCodeGenInfo::setTargetAttributes(
}
// Attach kernel metadata directly if compiling for NVPTX.
if (FD->hasAttr<DeviceKernelAttr>())
- F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ F->setCallingConv(getDeviceKernelCallingConv());
}
void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 80e096e..15d0b35 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -64,6 +64,8 @@ public:
llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
llvm::PointerType *T,
QualType QT) const override;
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
};
class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
public:
@@ -268,6 +270,22 @@ CommonSPIRTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
llvm::ConstantPointerNull::get(NPT), PT);
}
+void CommonSPIRTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (M.getLangOpts().OpenCL || GV->isDeclaration())
+ return;
+
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD)
+ return;
+
+ llvm::Function *F = dyn_cast<llvm::Function>(GV);
+ assert(F && "Expected GlobalValue to be a Function");
+
+ if (FD->hasAttr<DeviceKernelAttr>())
+ F->setCallingConv(getDeviceKernelCallingConv());
+}
+
LangAS
SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
const VarDecl *D) const {
@@ -292,19 +310,23 @@ SPIRVTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
void SPIRVTargetCodeGenInfo::setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (!M.getLangOpts().HIP ||
- M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
- return;
if (GV->isDeclaration())
return;
- auto F = dyn_cast<llvm::Function>(GV);
- if (!F)
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
return;
- auto FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
+ llvm::Function *F = dyn_cast<llvm::Function>(GV);
+ assert(F && "Expected GlobalValue to be a Function");
+
+ if (FD->hasAttr<DeviceKernelAttr>())
+ F->setCallingConv(getDeviceKernelCallingConv());
+
+ if (!M.getLangOpts().HIP ||
+ M.getTarget().getTriple().getVendor() != llvm::Triple::AMD)
return;
+
if (!FD->hasAttr<CUDAGlobalAttr>())
return;