aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
authorGuillaume Chatelet <gchatelet@google.com>2020-07-07 09:54:13 +0000
committerGuillaume Chatelet <gchatelet@google.com>2020-07-07 09:54:13 +0000
commit74c723757e69fbe7d85e42527d07b728113699ae (patch)
tree8bcada75f6dfc652e6fa090318d67149bb282442 /llvm
parent2cdf108d329bda280948ad634aa0a070337a5f88 (diff)
downloadllvm-74c723757e69fbe7d85e42527d07b728113699ae.zip
llvm-74c723757e69fbe7d85e42527d07b728113699ae.tar.gz
llvm-74c723757e69fbe7d85e42527d07b728113699ae.tar.bz2
[NFC] Adding the align attribute on Atomic{CmpXchg|RMW}Inst
This is the first step to add support for the align attribute to AtomicRMWInst and AtomicCmpXchgInst. Next step is to add support in IRBuilder and BitcodeReader. Bug: https://bugs.llvm.org/show_bug.cgi?id=27168 Differential Revision: https://reviews.llvm.org/D83136
Diffstat (limited to 'llvm')
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h18
-rw-r--r--llvm/include/llvm/IR/Instructions.h87
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp13
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp10
-rw-r--r--llvm/lib/IR/Instructions.cpp65
-rw-r--r--llvm/unittests/Analysis/AliasAnalysisTest.cpp9
6 files changed, 110 insertions, 92 deletions
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index ec042f0..ffec4ff 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -1733,19 +1733,21 @@ public:
return Insert(new FenceInst(Context, Ordering, SSID), Name);
}
- AtomicCmpXchgInst *
- CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
- AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SyncScope::ID SSID = SyncScope::System) {
- return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
- FailureOrdering, SSID));
+ AtomicCmpXchgInst *CreateAtomicCmpXchg(
+ Value *Ptr, Value *Cmp, Value *New, AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID = SyncScope::System) {
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align Alignment(DL.getTypeStoreSize(New->getType()));
+ return Insert(new AtomicCmpXchgInst(
+ Ptr, Cmp, New, Alignment, SuccessOrdering, FailureOrdering, SSID));
}
AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
SyncScope::ID SSID = SyncScope::System) {
- return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SSID));
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ Align Alignment(DL.getTypeStoreSize(Val->getType()));
+ return Insert(new AtomicRMWInst(Op, Ptr, Val, Alignment, Ordering, SSID));
}
Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 57ad0db..7119b13 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -513,10 +513,15 @@ private:
/// failure (false) as second element.
///
class AtomicCmpXchgInst : public Instruction {
- void Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
SyncScope::ID SSID);
+ template <unsigned Offset>
+ using AtomicOrderingBitfieldElement =
+ typename Bitfield::Element<AtomicOrdering, Offset, 3,
+ AtomicOrdering::LAST>;
+
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -524,34 +529,35 @@ protected:
AtomicCmpXchgInst *cloneImpl() const;
public:
- AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SyncScope::ID SSID, Instruction *InsertBefore = nullptr);
- AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID,
+ Instruction *InsertBefore = nullptr);
+ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering,
- SyncScope::ID SSID, BasicBlock *InsertAtEnd);
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID,
+ BasicBlock *InsertAtEnd);
// allocate space for exactly three operands
void *operator new(size_t s) {
return User::operator new(s, 3);
}
- // FIXME: Reuse bit 1 that was used by `syncscope.`
- using VolatileField = Bitfield::Element<bool, 0, 1>; // Next bit:1
- using SuccessOrderingField =
- Bitfield::Element<AtomicOrdering, 2, 3,
- AtomicOrdering::LAST>; // Next bit:5
- using FailureOrderingField =
- Bitfield::Element<AtomicOrdering, 5, 3,
- AtomicOrdering::LAST>; // Next bit:8
- using WeakField = Bitfield::Element<bool, 8, 1>; // Next bit:9
+ using VolatileField = Bitfield::Element<bool, 0, 1>; // Next bit:1
+ using WeakField = Bitfield::Element<bool, 1, 1>; // Next bit:2
+ using SuccessOrderingField = AtomicOrderingBitfieldElement<2>; // Next bit:5
+ using FailureOrderingField = AtomicOrderingBitfieldElement<5>; // Next bit:8
+ using AlignmentField = AlignmentBitfieldElement<8>; // Next bit:13
- /// Always returns the natural type alignment.
- /// FIXME: Introduce a proper alignment
- /// https://bugs.llvm.org/show_bug.cgi?id=27168
- Align getAlign() const;
+ /// Return the alignment of the memory that is being allocated by the
+ /// instruction.
+ Align getAlign() const {
+ return Align(1ULL << getSubclassData<AlignmentField>());
+ }
+
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
+ }
/// Return true if this is a cmpxchg from a volatile memory
/// location.
@@ -726,10 +732,21 @@ public:
BAD_BINOP
};
- AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+private:
+ template <unsigned Offset>
+ using AtomicOrderingBitfieldElement =
+ typename Bitfield::Element<AtomicOrdering, Offset, 3,
+ AtomicOrdering::LAST>;
+
+ template <unsigned Offset>
+ using BinOpBitfieldElement =
+ typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
+
+public:
+ AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
AtomicOrdering Ordering, SyncScope::ID SSID,
Instruction *InsertBefore = nullptr);
- AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
AtomicOrdering Ordering, SyncScope::ID SSID,
BasicBlock *InsertAtEnd);
@@ -738,13 +755,10 @@ public:
return User::operator new(s, 2);
}
- // FIXME: Reuse bit 1 that was used by `syncscope.`
- using VolatileField = Bitfield::Element<bool, 0, 1>; // Next bit:1
- using AtomicOrderingField =
- Bitfield::Element<AtomicOrdering, 2, 3,
- AtomicOrdering::LAST>; // Next bit:5
- using OperationField = Bitfield::Element<BinOp, 5, 4,
- BinOp::LAST_BINOP>; // Next bit:9
+ using VolatileField = Bitfield::Element<bool, 0, 1>; // Next bit:1
+ using AtomicOrderingField = AtomicOrderingBitfieldElement<1>; // Next bit:4
+ using OperationField = BinOpBitfieldElement<4>; // Next bit:8
+ using AlignmentField = AlignmentBitfieldElement<8>; // Next bit:13
BinOp getOperation() const { return getSubclassData<OperationField>(); }
@@ -764,10 +778,15 @@ public:
setSubclassData<OperationField>(Operation);
}
- /// Always returns the natural type alignment.
- /// FIXME: Introduce a proper alignment
- /// https://bugs.llvm.org/show_bug.cgi?id=27168
- Align getAlign() const;
+ /// Return the alignment of the memory that is being allocated by the
+ /// instruction.
+ Align getAlign() const {
+ return Align(1ULL << getSubclassData<AlignmentField>());
+ }
+
+ void setAlignment(Align Align) {
+ setSubclassData<AlignmentField>(Log2(Align));
+ }
/// Return true if this is a RMW on a volatile memory location.
///
@@ -827,7 +846,7 @@ public:
}
private:
- void Init(BinOp Operation, Value *Ptr, Value *Val,
+ void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
AtomicOrdering Ordering, SyncScope::ID SSID);
// Shadow Instruction::setInstructionSubclassData with a private forwarding
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index db4fbfd..85105f2 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -7209,8 +7209,13 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
return Error(NewLoc, "new value and pointer type do not match");
if (!New->getType()->isFirstClassType())
return Error(NewLoc, "cmpxchg operand must be a first class value");
+
+ Align Alignment(
+ PFS.getFunction().getParent()->getDataLayout().getTypeStoreSize(
+ Cmp->getType()));
+
AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
- Ptr, Cmp, New, SuccessOrdering, FailureOrdering, SSID);
+ Ptr, Cmp, New, Alignment, SuccessOrdering, FailureOrdering, SSID);
CXI->setVolatile(isVolatile);
CXI->setWeak(isWeak);
Inst = CXI;
@@ -7294,9 +7299,11 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
if (Size < 8 || (Size & (Size - 1)))
return Error(ValLoc, "atomicrmw operand must be power-of-two byte-sized"
" integer");
-
+ Align Alignment(
+ PFS.getFunction().getParent()->getDataLayout().getTypeStoreSize(
+ Val->getType()));
AtomicRMWInst *RMWI =
- new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID);
+ new AtomicRMWInst(Operation, Ptr, Val, Alignment, Ordering, SSID);
RMWI->setVolatile(isVolatile);
Inst = RMWI;
return AteExtraComma ? InstExtraComma : InstNormal;
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 4471302..dceb492 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -5020,8 +5020,10 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
else
FailureOrdering = getDecodedOrdering(Record[OpNum + 3]);
- I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
- SSID);
+ Align Alignment(
+ TheModule->getDataLayout().getTypeStoreSize(Cmp->getType()));
+ I = new AtomicCmpXchgInst(Ptr, Cmp, New, Alignment, SuccessOrdering,
+ FailureOrdering, SSID);
FullTy = StructType::get(Context, {FullTy, Type::getInt1Ty(Context)});
cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
@@ -5058,7 +5060,9 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
Ordering == AtomicOrdering::Unordered)
return error("Invalid record");
SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
- I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID);
+ Align Alignment(
+ TheModule->getDataLayout().getTypeStoreSize(Val->getType()));
+ I = new AtomicRMWInst(Operation, Ptr, Val, Alignment, Ordering, SSID);
FullTy = getPointerElementFlatType(FullTy);
cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
InstructionList.push_back(I);
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index e22f609..f650ad9 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1479,7 +1479,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
//===----------------------------------------------------------------------===//
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
- AtomicOrdering SuccessOrdering,
+ Align Alignment, AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SyncScope::ID SSID) {
Op<0>() = Ptr;
@@ -1488,6 +1488,7 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
setSuccessOrdering(SuccessOrdering);
setFailureOrdering(FailureOrdering);
setSyncScopeID(SSID);
+ setAlignment(Alignment);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
"All operands must be non-null!");
@@ -1512,6 +1513,7 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ Align Alignment,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SyncScope::ID SSID,
@@ -1520,10 +1522,11 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
+ Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ Align Alignment,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
SyncScope::ID SSID,
@@ -1532,14 +1535,7 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
-}
-
-Align AtomicCmpXchgInst::getAlign() const {
- // The default here is to assume it has NATURAL alignment, not
- // DataLayout-specified alignment.
- const DataLayout &DL = getModule()->getDataLayout();
- return Align(DL.getTypeStoreSize(getCompareOperand()->getType()));
+ Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1547,13 +1543,14 @@ Align AtomicCmpXchgInst::getAlign() const {
//===----------------------------------------------------------------------===//
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering,
+ Align Alignment, AtomicOrdering Ordering,
SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Val;
setOperation(Operation);
setOrdering(Ordering);
setSyncScopeID(SSID);
+ setAlignment(Alignment);
assert(getOperand(0) && getOperand(1) &&
"All operands must be non-null!");
@@ -1567,25 +1564,21 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
}
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering,
- SyncScope::ID SSID,
- Instruction *InsertBefore)
- : Instruction(Val->getType(), AtomicRMW,
- OperandTraits<AtomicRMWInst>::op_begin(this),
- OperandTraits<AtomicRMWInst>::operands(this),
- InsertBefore) {
- Init(Operation, Ptr, Val, Ordering, SSID);
+ Align Alignment, AtomicOrdering Ordering,
+ SyncScope::ID SSID, Instruction *InsertBefore)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
+ Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
}
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
- AtomicOrdering Ordering,
- SyncScope::ID SSID,
- BasicBlock *InsertAtEnd)
- : Instruction(Val->getType(), AtomicRMW,
- OperandTraits<AtomicRMWInst>::op_begin(this),
- OperandTraits<AtomicRMWInst>::operands(this),
- InsertAtEnd) {
- Init(Operation, Ptr, Val, Ordering, SSID);
+ Align Alignment, AtomicOrdering Ordering,
+ SyncScope::ID SSID, BasicBlock *InsertAtEnd)
+ : Instruction(Val->getType(), AtomicRMW,
+ OperandTraits<AtomicRMWInst>::op_begin(this),
+ OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
+ Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
}
StringRef AtomicRMWInst::getOperationName(BinOp Op) {
@@ -1623,13 +1616,6 @@ StringRef AtomicRMWInst::getOperationName(BinOp Op) {
llvm_unreachable("invalid atomicrmw operation");
}
-Align AtomicRMWInst::getAlign() const {
- // The default here is to assume it has NATURAL alignment, not
- // DataLayout-specified alignment.
- const DataLayout &DL = getModule()->getDataLayout();
- return Align(DL.getTypeStoreSize(getValOperand()->getType()));
-}
-
//===----------------------------------------------------------------------===//
// FenceInst Implementation
//===----------------------------------------------------------------------===//
@@ -4282,10 +4268,9 @@ StoreInst *StoreInst::cloneImpl() const {
}
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
- AtomicCmpXchgInst *Result =
- new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
- getSuccessOrdering(), getFailureOrdering(),
- getSyncScopeID());
+ AtomicCmpXchgInst *Result = new AtomicCmpXchgInst(
+ getOperand(0), getOperand(1), getOperand(2), getAlign(),
+ getSuccessOrdering(), getFailureOrdering(), getSyncScopeID());
Result->setVolatile(isVolatile());
Result->setWeak(isWeak());
return Result;
@@ -4293,8 +4278,8 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
AtomicRMWInst *Result =
- new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
- getOrdering(), getSyncScopeID());
+ new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
+ getAlign(), getOrdering(), getSyncScopeID());
Result->setVolatile(isVolatile());
return Result;
}
diff --git a/llvm/unittests/Analysis/AliasAnalysisTest.cpp b/llvm/unittests/Analysis/AliasAnalysisTest.cpp
index 83f4c248..6f0f6f5 100644
--- a/llvm/unittests/Analysis/AliasAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/AliasAnalysisTest.cpp
@@ -174,6 +174,7 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
auto PtrType = Type::getInt32PtrTy(C);
auto *Value = ConstantInt::get(IntType, 42);
auto *Addr = ConstantPointerNull::get(PtrType);
+ auto Alignment = Align(IntType->getBitWidth() / 8);
auto *Store1 = new StoreInst(Value, Addr, BB);
auto *Load1 = new LoadInst(IntType, Addr, "load", BB);
@@ -181,11 +182,11 @@ TEST_F(AliasAnalysisTest, getModRefInfo) {
auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
auto *CmpXChg1 = new AtomicCmpXchgInst(
Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1),
- AtomicOrdering::Monotonic, AtomicOrdering::Monotonic,
+ Alignment, AtomicOrdering::Monotonic, AtomicOrdering::Monotonic,
SyncScope::System, BB);
- auto *AtomicRMW =
- new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1),
- AtomicOrdering::Monotonic, SyncScope::System, BB);
+ auto *AtomicRMW = new AtomicRMWInst(
+ AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1), Alignment,
+ AtomicOrdering::Monotonic, SyncScope::System, BB);
ReturnInst::Create(C, nullptr, BB);