aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/IR
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/IR')
-rw-r--r--llvm/lib/IR/AsmWriter.cpp2
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp70
-rw-r--r--llvm/lib/IR/IRBuilder.cpp30
-rw-r--r--llvm/lib/IR/Instructions.cpp2
-rw-r--r--llvm/lib/IR/Intrinsics.cpp13
-rw-r--r--llvm/lib/IR/ModuleSummaryIndex.cpp12
-rw-r--r--llvm/lib/IR/RuntimeLibcalls.cpp3
-rw-r--r--llvm/lib/IR/Verifier.cpp27
8 files changed, 114 insertions, 45 deletions
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 3908a78..488b078 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -3196,7 +3196,7 @@ void AssemblyWriter::printModuleSummaryIndex() {
// for aliasee (then update BitcodeWriter.cpp and remove get/setAliaseeGUID).
for (auto &GlobalList : *TheIndex) {
auto GUID = GlobalList.first;
- for (auto &Summary : GlobalList.second.SummaryList)
+ for (auto &Summary : GlobalList.second.getSummaryList())
SummaryToGUIDMap[Summary.get()] = GUID;
}
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index d8374b6..10f915d 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1426,6 +1426,28 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
Intrinsic::memset, ParamTypes);
return true;
}
+
+ unsigned MaskedID =
+ StringSwitch<unsigned>(Name)
+ .StartsWith("masked.load", Intrinsic::masked_load)
+ .StartsWith("masked.gather", Intrinsic::masked_gather)
+ .StartsWith("masked.store", Intrinsic::masked_store)
+ .StartsWith("masked.scatter", Intrinsic::masked_scatter)
+ .Default(0);
+ if (MaskedID && F->arg_size() == 4) {
+ rename(F);
+ if (MaskedID == Intrinsic::masked_load ||
+ MaskedID == Intrinsic::masked_gather) {
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), MaskedID,
+ {F->getReturnType(), F->getArg(0)->getType()});
+ return true;
+ }
+ NewFn = Intrinsic::getOrInsertDeclaration(
+ F->getParent(), MaskedID,
+ {F->getArg(0)->getType(), F->getArg(1)->getType()});
+ return true;
+ }
break;
}
case 'n': {
@@ -5231,6 +5253,54 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
break;
}
+ case Intrinsic::masked_load:
+ case Intrinsic::masked_gather:
+ case Intrinsic::masked_store:
+ case Intrinsic::masked_scatter: {
+ if (CI->arg_size() != 4) {
+ DefaultCase();
+ return;
+ }
+
+ const DataLayout &DL = CI->getDataLayout();
+ switch (NewFn->getIntrinsicID()) {
+ case Intrinsic::masked_load:
+ NewCall = Builder.CreateMaskedLoad(
+ CI->getType(), CI->getArgOperand(0),
+ cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue(),
+ CI->getArgOperand(2), CI->getArgOperand(3));
+ break;
+ case Intrinsic::masked_gather:
+ NewCall = Builder.CreateMaskedGather(
+ CI->getType(), CI->getArgOperand(0),
+ DL.getValueOrABITypeAlignment(
+ cast<ConstantInt>(CI->getArgOperand(1))->getMaybeAlignValue(),
+ CI->getType()->getScalarType()),
+ CI->getArgOperand(2), CI->getArgOperand(3));
+ break;
+ case Intrinsic::masked_store:
+ NewCall = Builder.CreateMaskedStore(
+ CI->getArgOperand(0), CI->getArgOperand(1),
+ cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue(),
+ CI->getArgOperand(3));
+ break;
+ case Intrinsic::masked_scatter:
+ NewCall = Builder.CreateMaskedScatter(
+ CI->getArgOperand(0), CI->getArgOperand(1),
+ DL.getValueOrABITypeAlignment(
+ cast<ConstantInt>(CI->getArgOperand(2))->getMaybeAlignValue(),
+ CI->getArgOperand(0)->getType()->getScalarType()),
+ CI->getArgOperand(3));
+ break;
+ default:
+ llvm_unreachable("Unexpected intrinsic ID");
+ }
+ // Previous metadata is still valid.
+ NewCall->copyMetadata(*CI);
+ NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind());
+ break;
+ }
+
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end: {
if (CI->arg_size() != 2) {
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 15c0198..88dbd17 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -495,9 +495,11 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
if (!PassThru)
PassThru = PoisonValue::get(Ty);
Type *OverloadedTypes[] = { Ty, PtrTy };
- Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
- return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
- OverloadedTypes, Name);
+ Value *Ops[] = {Ptr, Mask, PassThru};
+ CallInst *CI =
+ CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, OverloadedTypes, Name);
+ CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), Alignment));
+ return CI;
}
/// Create a call to a Masked Store intrinsic.
@@ -513,8 +515,11 @@ CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
assert(DataTy->isVectorTy() && "Val should be a vector");
assert(Mask && "Mask should not be all-ones (null)");
Type *OverloadedTypes[] = { DataTy, PtrTy };
- Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
- return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
+ Value *Ops[] = {Val, Ptr, Mask};
+ CallInst *CI =
+ CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
+ CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), Alignment));
+ return CI;
}
/// Create a call to a Masked intrinsic, with given intrinsic Id,
@@ -552,12 +557,14 @@ CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
PassThru = PoisonValue::get(Ty);
Type *OverloadedTypes[] = {Ty, PtrsTy};
- Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
+ Value *Ops[] = {Ptrs, Mask, PassThru};
// We specify only one type when we create this intrinsic. Types of other
// arguments are derived from this type.
- return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
- Name);
+ CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops,
+ OverloadedTypes, Name);
+ CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), Alignment));
+ return CI;
}
/// Create a call to a Masked Scatter intrinsic.
@@ -577,11 +584,14 @@ CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
Mask = getAllOnesMask(NumElts);
Type *OverloadedTypes[] = {DataTy, PtrsTy};
- Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
+ Value *Ops[] = {Data, Ptrs, Mask};
// We specify only one type when we create this intrinsic. Types of other
// arguments are derived from this type.
- return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
+ CallInst *CI =
+ CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
+ CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), Alignment));
+ return CI;
}
/// Create a call to Masked Expand Load intrinsic
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 9060a89..3b8fde8 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -2878,7 +2878,7 @@ unsigned CastInst::isEliminableCastPair(Instruction::CastOps firstOp,
{ 99,99,99, 0, 0,99,99, 0, 0,99,99,99, 4, 0}, // FPTrunc |
{ 99,99,99, 2, 2,99,99, 8, 2,99,99,99, 4, 0}, // FPExt |
{ 1, 0, 0,99,99, 0, 0,99,99,99,99, 7, 3, 0}, // PtrToInt |
- { 1, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
+ { 0, 0, 0,99,99, 0, 0,99,99,99,99, 0, 3, 0}, // PtrToAddr |
{ 99,99,99,99,99,99,99,99,99,11,11,99,15, 0}, // IntToPtr |
{ 5, 5, 5, 0, 0, 5, 5, 0, 0,16,16, 5, 1,14}, // BitCast |
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
diff --git a/llvm/lib/IR/Intrinsics.cpp b/llvm/lib/IR/Intrinsics.cpp
index 6797a10..526800e 100644
--- a/llvm/lib/IR/Intrinsics.cpp
+++ b/llvm/lib/IR/Intrinsics.cpp
@@ -725,6 +725,19 @@ Function *Intrinsic::getOrInsertDeclaration(Module *M, ID id,
// There can never be multiple globals with the same name of different types,
// because intrinsics must be a specific type.
auto *FT = getType(M->getContext(), id, Tys);
+ Function *F = cast<Function>(
+ M->getOrInsertFunction(
+ Tys.empty() ? getName(id) : getName(id, Tys, M, FT), FT)
+ .getCallee());
+ if (F->getFunctionType() == FT)
+ return F;
+
+ // It's possible that a declaration for this intrinsic already exists with an
+ // incorrect signature, if the signature has changed, but this particular
+ // declaration has not been auto-upgraded yet. In that case, rename the
+ // invalid declaration and insert a new one with the correct signature. The
+ // invalid declaration will get upgraded later.
+ F->setName(F->getName() + ".invalid");
return cast<Function>(
M->getOrInsertFunction(
Tys.empty() ? getName(id) : getName(id, Tys, M, FT), FT)
diff --git a/llvm/lib/IR/ModuleSummaryIndex.cpp b/llvm/lib/IR/ModuleSummaryIndex.cpp
index dc55b63..a6353664 100644
--- a/llvm/lib/IR/ModuleSummaryIndex.cpp
+++ b/llvm/lib/IR/ModuleSummaryIndex.cpp
@@ -162,7 +162,7 @@ void ModuleSummaryIndex::collectDefinedFunctionsForModule(
StringRef ModulePath, GVSummaryMapTy &GVSummaryMap) const {
for (auto &GlobalList : *this) {
auto GUID = GlobalList.first;
- for (auto &GlobSummary : GlobalList.second.SummaryList) {
+ for (auto &GlobSummary : GlobalList.second.getSummaryList()) {
auto *Summary = dyn_cast_or_null<FunctionSummary>(GlobSummary.get());
if (!Summary)
// Ignore global variable, focus on functions
@@ -263,7 +263,7 @@ void ModuleSummaryIndex::propagateAttributes(
DenseSet<ValueInfo> MarkedNonReadWriteOnly;
for (auto &P : *this) {
bool IsDSOLocal = true;
- for (auto &S : P.second.SummaryList) {
+ for (auto &S : P.second.getSummaryList()) {
if (!isGlobalValueLive(S.get())) {
// computeDeadSymbolsAndUpdateIndirectCalls should have marked all
// copies live. Note that it is possible that there is a GUID collision
@@ -273,7 +273,7 @@ void ModuleSummaryIndex::propagateAttributes(
// all copies live we can assert here that all are dead if any copy is
// dead.
assert(llvm::none_of(
- P.second.SummaryList,
+ P.second.getSummaryList(),
[&](const std::unique_ptr<GlobalValueSummary> &Summary) {
return isGlobalValueLive(Summary.get());
}));
@@ -308,16 +308,16 @@ void ModuleSummaryIndex::propagateAttributes(
// Mark the flag in all summaries false so that we can do quick check
// without going through the whole list.
for (const std::unique_ptr<GlobalValueSummary> &Summary :
- P.second.SummaryList)
+ P.second.getSummaryList())
Summary->setDSOLocal(false);
}
setWithAttributePropagation();
setWithDSOLocalPropagation();
if (llvm::AreStatisticsEnabled())
for (auto &P : *this)
- if (P.second.SummaryList.size())
+ if (P.second.getSummaryList().size())
if (auto *GVS = dyn_cast<GlobalVarSummary>(
- P.second.SummaryList[0]->getBaseObject()))
+ P.second.getSummaryList()[0]->getBaseObject()))
if (isGlobalValueLive(GVS)) {
if (GVS->maybeReadOnly())
ReadOnlyLiveGVars++;
diff --git a/llvm/lib/IR/RuntimeLibcalls.cpp b/llvm/lib/IR/RuntimeLibcalls.cpp
index 7ea2e46..77af29b 100644
--- a/llvm/lib/IR/RuntimeLibcalls.cpp
+++ b/llvm/lib/IR/RuntimeLibcalls.cpp
@@ -21,9 +21,6 @@ using namespace RTLIB;
#define GET_SET_TARGET_RUNTIME_LIBCALL_SETS
#define DEFINE_GET_LOOKUP_LIBCALL_IMPL_NAME
#include "llvm/IR/RuntimeLibcalls.inc"
-#undef GET_INIT_RUNTIME_LIBCALL_NAMES
-#undef GET_SET_TARGET_RUNTIME_LIBCALL_SETS
-#undef DEFINE_GET_LOOKUP_LIBCALL_IMPL_NAME
/// Set default libcall names. If a target wants to opt-out of a libcall it
/// should be placed here.
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 3572852..03da154 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6211,13 +6211,10 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
Check(Call.getType()->isVectorTy(), "masked_load: must return a vector",
Call);
- ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
- Value *Mask = Call.getArgOperand(2);
- Value *PassThru = Call.getArgOperand(3);
+ Value *Mask = Call.getArgOperand(1);
+ Value *PassThru = Call.getArgOperand(2);
Check(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
Call);
- Check(Alignment->getValue().isPowerOf2(),
- "masked_load: alignment must be a power of 2", Call);
Check(PassThru->getType() == Call.getType(),
"masked_load: pass through and return type must match", Call);
Check(cast<VectorType>(Mask->getType())->getElementCount() ==
@@ -6227,33 +6224,15 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
case Intrinsic::masked_store: {
Value *Val = Call.getArgOperand(0);
- ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
- Value *Mask = Call.getArgOperand(3);
+ Value *Mask = Call.getArgOperand(2);
Check(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
Call);
- Check(Alignment->getValue().isPowerOf2(),
- "masked_store: alignment must be a power of 2", Call);
Check(cast<VectorType>(Mask->getType())->getElementCount() ==
cast<VectorType>(Val->getType())->getElementCount(),
"masked_store: vector mask must be same length as value", Call);
break;
}
- case Intrinsic::masked_gather: {
- const APInt &Alignment =
- cast<ConstantInt>(Call.getArgOperand(1))->getValue();
- Check(Alignment.isZero() || Alignment.isPowerOf2(),
- "masked_gather: alignment must be 0 or a power of 2", Call);
- break;
- }
- case Intrinsic::masked_scatter: {
- const APInt &Alignment =
- cast<ConstantInt>(Call.getArgOperand(2))->getValue();
- Check(Alignment.isZero() || Alignment.isPowerOf2(),
- "masked_scatter: alignment must be 0 or a power of 2", Call);
- break;
- }
-
case Intrinsic::experimental_guard: {
Check(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
Check(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,