aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/IR/AutoUpgrade.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/IR/AutoUpgrade.cpp')
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp129
1 files changed, 103 insertions, 26 deletions
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index f28b989..10f915d 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1426,6 +1426,28 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
Intrinsic::memset, ParamTypes);
return true;
}
+
+ unsigned MaskedID =
+ StringSwitch<unsigned>(Name)
+ .StartsWith("masked.load", Intrinsic::masked_load)
+ .StartsWith("masked.gather", Intrinsic::masked_gather)
+ .StartsWith("masked.store", Intrinsic::masked_store)
+ .StartsWith("masked.scatter", Intrinsic::masked_scatter)
+ .Default(0);
+ if (MaskedID && F->arg_size() == 4) {
+ rename(F);
+ if (MaskedID == Intrinsic::masked_load ||
+ MaskedID == Intrinsic::masked_gather) {
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), MaskedID,
+ {F->getReturnType(), F->getArg(0)->getType()});
+ return true;
+ }
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), MaskedID,
+ {F->getArg(0)->getType(), F->getArg(1)->getType()});
+ return true;
+ }
break;
}
case 'n': {
@@ -5231,6 +5253,54 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
break;
}
+ case Intrinsic::masked_load:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_scatter: {
+ if (CI->arg_size() != 4) {
+ DefaultCase();
+ return;
+ }
+
+ const DataLayout &DL = CI->getDataLayout();
+ switch (NewFn->getIntrinsicID()) {
+ case Intrinsic::masked_load:
+ NewCall = Builder.CreateMaskedLoad(
+ CI->getType(), CI->getArgOperand(0),
+ cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue(),
+ CI->getArgOperand(2), CI->getArgOperand(3));
+ break;
+ case Intrinsic::masked_gather:
+ NewCall = Builder.CreateMaskedGather(
+ CI->getType(), CI->getArgOperand(0),
+ DL.getValueOrABITypeAlignment(
+ cast<ConstantInt>(CI->getArgOperand(1))->getMaybeAlignValue(),
+ CI->getType()->getScalarType()),
+ CI->getArgOperand(2), CI->getArgOperand(3));
+ break;
+ case Intrinsic::masked_store:
+ NewCall = Builder.CreateMaskedStore(
+ CI->getArgOperand(0), CI->getArgOperand(1),
+ cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue(),
+ CI->getArgOperand(3));
+ break;
+ case Intrinsic::masked_scatter:
+ NewCall = Builder.CreateMaskedScatter(
+ CI->getArgOperand(0), CI->getArgOperand(1),
+ DL.getValueOrABITypeAlignment(
+ cast<ConstantInt>(CI->getArgOperand(2))->getMaybeAlignValue(),
+ CI->getArgOperand(0)->getType()->getScalarType()),
+ CI->getArgOperand(3));
+ break;
+ default:
+ llvm_unreachable("Unexpected intrinsic ID");
+ }
+ // Previous metadata is still valid.
+ NewCall->copyMetadata(*CI);
+ NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
+ break;
+ }
+
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
if (CI->arg_size() != 2) {
@@ -6041,8 +6111,7 @@ std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
Triple T(TT);
// The only data layout upgrades needed for pre-GCN, SPIR or SPIRV are setting
// the address space of globals to 1. This does not apply to SPIRV Logical.
- if (((T.isAMDGPU() && !T.isAMDGCN()) ||
- (T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical()))) &&
+ if ((T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical())) &&
!DL.contains("-G") && !DL.starts_with("G")) {
return DL.empty() ? std::string("G1") : (DL + "-G1").str();
}
@@ -6055,35 +6124,43 @@ std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
return DL.str();
}
+ // AMDGPU data layout upgrades.
std::string Res = DL.str();
- // AMDGCN data layout upgrades.
- if (T.isAMDGCN()) {
+ if (T.isAMDGPU()) {
// Define address spaces for constants.
if (!DL.contains("-G") && !DL.starts_with("G"))
Res.append(Res.empty() ? "G1" : "-G1");
- // Add missing non-integral declarations.
- // This goes before adding new address spaces to prevent incoherent string
- // values.
- if (!DL.contains("-ni") && !DL.starts_with("ni"))
- Res.append("-ni:7:8:9");
- // Update ni:7 to ni:7:8:9.
- if (DL.ends_with("ni:7"))
- Res.append(":8:9");
- if (DL.ends_with("ni:7:8"))
- Res.append(":9");
-
- // Add sizing for address spaces 7 and 8 (fat raw buffers and buffer
- // resources) An empty data layout has already been upgraded to G1 by now.
- if (!DL.contains("-p7") && !DL.starts_with("p7"))
- Res.append("-p7:160:256:256:32");
- if (!DL.contains("-p8") && !DL.starts_with("p8"))
- Res.append("-p8:128:128:128:48");
- constexpr StringRef OldP8("-p8:128:128-");
- if (DL.contains(OldP8))
- Res.replace(Res.find(OldP8), OldP8.size(), "-p8:128:128:128:48-");
- if (!DL.contains("-p9") && !DL.starts_with("p9"))
- Res.append("-p9:192:256:256:32");
+ // AMDGCN data layout upgrades.
+ if (T.isAMDGCN()) {
+
+ // Add missing non-integral declarations.
+ // This goes before adding new address spaces to prevent incoherent string
+ // values.
+ if (!DL.contains("-ni") && !DL.starts_with("ni"))
+ Res.append("-ni:7:8:9");
+ // Update ni:7 to ni:7:8:9.
+ if (DL.ends_with("ni:7"))
+ Res.append(":8:9");
+ if (DL.ends_with("ni:7:8"))
+ Res.append(":9");
+
+ // Add sizing for address spaces 7 and 8 (fat raw buffers and buffer
+ // resources) An empty data layout has already been upgraded to G1 by now.
+ if (!DL.contains("-p7") && !DL.starts_with("p7"))
+ Res.append("-p7:160:256:256:32");
+ if (!DL.contains("-p8") && !DL.starts_with("p8"))
+ Res.append("-p8:128:128:128:48");
+ constexpr StringRef OldP8("-p8:128:128-");
+ if (DL.contains(OldP8))
+ Res.replace(Res.find(OldP8), OldP8.size(), "-p8:128:128:128:48-");
+ if (!DL.contains("-p9") && !DL.starts_with("p9"))
+ Res.append("-p9:192:256:256:32");
+ }
+
+ // Upgrade the ELF mangling mode.
+ if (!DL.contains("m:e"))
+ Res = Res.empty() ? "m:e" : "m:e-" + Res;
return Res;
}