aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Timofeev <Alexander.Timofeev@amd.com>2017-06-15 19:33:10 +0000
committerAlexander Timofeev <Alexander.Timofeev@amd.com>2017-06-15 19:33:10 +0000
commit0f9c84cd936b6532495673613d9390efdc85cb3c (patch)
treef21751f18521ab02ca6f4075169926ff7399333a
parentf2d3e6d3d50ea7f16ffaf12bfbb03384d019d3b2 (diff)
downloadllvm-0f9c84cd936b6532495673613d9390efdc85cb3c.zip
llvm-0f9c84cd936b6532495673613d9390efdc85cb3c.tar.gz
llvm-0f9c84cd936b6532495673613d9390efdc85cb3c.tar.bz2
DivergencyAnalysis patch for review
llvm-svn: 305494
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h10
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h2
-rw-r--r--llvm/include/llvm/CodeGen/BasicTTIImpl.h2
-rw-r--r--llvm/lib/Analysis/DivergenceAnalysis.cpp2
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h1
-rw-r--r--llvm/test/CodeGen/AMDGPU/always-uniform.ll21
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll33
10 files changed, 88 insertions, 2 deletions
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 471b864..af2ebb7 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -235,6 +235,11 @@ public:
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
+ // \brief Returns true for the target specific
+ // set of operations which produce uniform result
+ // even taking non-unform arguments
+ bool isAlwaysUniform(const Value *V) const;
+
/// Returns the address space ID for a target's 'flat' address space. Note
/// this is not necessarily the same as addrspace(0), which LLVM sometimes
/// refers to as the generic address space. The flat address space is a
@@ -821,6 +826,7 @@ public:
virtual int getUserCost(const User *U) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
+ virtual bool isAlwaysUniform(const Value *V) = 0;
virtual unsigned getFlatAddressSpace() = 0;
virtual bool isLoweredToCall(const Function *F) = 0;
virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
@@ -998,6 +1004,10 @@ public:
return Impl.isSourceOfDivergence(V);
}
+ bool isAlwaysUniform(const Value *V) override {
+ return Impl.isAlwaysUniform(V);
+ }
+
unsigned getFlatAddressSpace() override {
return Impl.getFlatAddressSpace();
}
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 7884ffe..24ac3b12 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -177,6 +177,8 @@ public:
bool isSourceOfDivergence(const Value *V) { return false; }
+ bool isAlwaysUniform(const Value *V) { return false; }
+
unsigned getFlatAddressSpace () {
return -1;
}
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 75277cd..5eb7a0f 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -93,6 +93,8 @@ public:
bool isSourceOfDivergence(const Value *V) { return false; }
+ bool isAlwaysUniform(const Value *V) { return false; }
+
unsigned getFlatAddressSpace() {
// Return an invalid address space.
return -1;
diff --git a/llvm/lib/Analysis/DivergenceAnalysis.cpp b/llvm/lib/Analysis/DivergenceAnalysis.cpp
index 1b36569..2d39a0b 100644
--- a/llvm/lib/Analysis/DivergenceAnalysis.cpp
+++ b/llvm/lib/Analysis/DivergenceAnalysis.cpp
@@ -241,7 +241,7 @@ void DivergencePropagator::exploreDataDependency(Value *V) {
// Follow def-use chains of V.
for (User *U : V->users()) {
Instruction *UserInst = cast<Instruction>(U);
- if (DV.insert(UserInst).second)
+ if (!TTI.isAlwaysUniform(U) && DV.insert(UserInst).second)
Worklist.push_back(UserInst);
}
}
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 488cb33..92328f6 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -103,6 +103,10 @@ bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
return TTIImpl->isSourceOfDivergence(V);
}
+bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
+ return TTIImpl->isAlwaysUniform(V);
+}
+
unsigned TargetTransformInfo::getFlatAddressSpace() const {
return TTIImpl->getFlatAddressSpace();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
index 3c788fa..6f00286 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
@@ -107,7 +107,7 @@ bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
DFS(Start, Checklist);
for (auto &BB : Checklist) {
- BasicBlock::iterator StartIt = (BB == Load->getParent()) ?
+ BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
BasicBlock::iterator(Load) : BB->end();
if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr),
true, StartIt, BB, Load).isClobber())
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 65dba7d..0d6689b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -489,6 +489,19 @@ bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const {
return false;
}
+bool AMDGPUTTIImpl::isAlwaysUniform(const Value *V) const {
+ if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
+ switch (Intrinsic->getIntrinsicID()) {
+ default:
+ return false;
+ case Intrinsic::amdgcn_readfirstlane:
+ case Intrinsic::amdgcn_readlane:
+ return true;
+ }
+ }
+ return false;
+}
+
unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
if (ST->hasVOP3PInsts()) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 8466f11..a60b1bb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
@@ -103,6 +103,7 @@ public:
int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index);
bool isSourceOfDivergence(const Value *V) const;
+ bool isAlwaysUniform(const Value *V) const;
unsigned getFlatAddressSpace() const {
// Don't bother running InferAddressSpaces pass on graphics shaders which
diff --git a/llvm/test/CodeGen/AMDGPU/always-uniform.ll b/llvm/test/CodeGen/AMDGPU/always-uniform.ll
new file mode 100644
index 0000000..4ba57fb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/always-uniform.ll
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple amdgcn-amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.readfirstlane(i32)
+
+; GCN-LABEL: readfirstlane_uniform
+; GCN: s_load_dwordx2 s{{\[}}[[IN_ADDR:[0-9]+]]:1{{\]}}, s[4:5], 0x0
+; GCN: v_readfirstlane_b32 s[[SCALAR:[0-9]+]], v0
+; GCN: s_add_u32 s[[LOAD_ADDR:[0-9]+]], s[[IN_ADDR]], s[[SCALAR]]
+; GCN: s_load_dword s{{[0-9]+}}, s{{\[}}[[LOAD_ADDR]]
+
+define amdgpu_kernel void @readfirstlane_uniform(float addrspace(1)* noalias nocapture readonly, float addrspace(1)* noalias nocapture readonly) {
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %scalar = tail call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+ %idx = zext i32 %scalar to i64
+ %gep0 = getelementptr inbounds float, float addrspace(1)* %0, i64 %idx
+ %val = load float, float addrspace(1)* %gep0, align 4
+ %gep1 = getelementptr inbounds float, float addrspace(1)* %1, i64 10
+ store float %val, float addrspace(1)* %gep1, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll b/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll
index a6a0415..be6e3fd 100644
--- a/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_smrd_cfg.ll
@@ -72,6 +72,39 @@ bb22: ; preds = %bb20, %bb11
br i1 %tmp31, label %bb7, label %bb11
}
+; one more test to ensure that aliasing store after the load
+; is considered clobbering if load parent block is the same
+; as a loop header block.
+
+; CHECK-LABEL: %bb1
+
+; Load from %arg has alias store that is after the load
+; but is considered clobbering because of the loop.
+
+; CHECK: flat_load_dword
+
+define amdgpu_kernel void @cfg_selfloop(i32 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) #0 {
+bb:
+ br label %bb1
+
+bb2:
+ ret void
+
+bb1:
+ %tmp13 = phi i32 [ %tmp25, %bb1 ], [ 0, %bb ]
+ %tmp14 = srem i32 %tmp13, %arg2
+ %tmp15 = sext i32 %tmp14 to i64
+ %tmp16 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp15
+ %tmp17 = load i32, i32 addrspace(1)* %tmp16, align 4, !tbaa !0
+ %tmp19 = sext i32 %tmp13 to i64
+ %tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp19
+ store i32 %tmp17, i32 addrspace(1)* %tmp21, align 4, !tbaa !0
+ %tmp25 = add nuw nsw i32 %tmp13, 1
+ %tmp31 = icmp eq i32 %tmp25, 100
+ br i1 %tmp31, label %bb2, label %bb1
+}
+
+
attributes #0 = { "target-cpu"="fiji" }
!0 = !{!1, !1, i64 0}