aboutsummaryrefslogtreecommitdiff
path: root/mlir/lib
diff options
context:
space:
mode:
authorRiver Riddle <riverriddle@google.com>2019-08-29 13:04:22 -0700
committerA. Unique TensorFlower <gardener@tensorflow.org>2019-08-29 13:04:50 -0700
commit4bfae66d70aea0df4bf9948e51f4bfa8895a4f4e (patch)
tree36d7709307e4319b8fe5fccb104ac7a9e6880603 /mlir/lib
parenta085700311cec4644381d71c6afeee021a7e8e25 (diff)
downloadllvm-4bfae66d70aea0df4bf9948e51f4bfa8895a4f4e.zip
llvm-4bfae66d70aea0df4bf9948e51f4bfa8895a4f4e.tar.gz
llvm-4bfae66d70aea0df4bf9948e51f4bfa8895a4f4e.tar.bz2
Refactor the 'walk' methods for operations.
This change refactors and cleans up the implementation of the operation walk methods. After this refactoring is that the explicit template parameter for the operation type is no longer needed for the explicit op walks. For example: op->walk<AffineForOp>([](AffineForOp op) { ... }); is now accomplished via: op->walk([](AffineForOp op) { ... }); PiperOrigin-RevId: 266209552
Diffstat (limited to 'mlir/lib')
-rw-r--r--mlir/lib/Analysis/TestParallelismDetection.cpp2
-rw-r--r--mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp2
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp4
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp3
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/LowerToLLVMDialect.cpp2
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp4
-rw-r--r--mlir/lib/IR/Block.cpp16
-rw-r--r--mlir/lib/IR/Operation.cpp13
-rw-r--r--mlir/lib/IR/Region.cpp7
-rw-r--r--mlir/lib/IR/Visitors.cpp34
-rw-r--r--mlir/lib/Transforms/LoopCoalescing.cpp2
-rw-r--r--mlir/lib/Transforms/LoopInvariantCodeMotion.cpp2
-rw-r--r--mlir/lib/Transforms/LoopUnroll.cpp2
-rw-r--r--mlir/lib/Transforms/MemRefDataFlowOpt.cpp3
-rw-r--r--mlir/lib/Transforms/PipelineDataTransfer.cpp3
-rw-r--r--mlir/lib/Transforms/Utils/LoopFusionUtils.cpp2
-rw-r--r--mlir/lib/Transforms/Utils/LoopUtils.cpp3
-rw-r--r--mlir/lib/Transforms/Vectorize.cpp2
18 files changed, 50 insertions, 56 deletions
diff --git a/mlir/lib/Analysis/TestParallelismDetection.cpp b/mlir/lib/Analysis/TestParallelismDetection.cpp
index 351a6a7..75982a8 100644
--- a/mlir/lib/Analysis/TestParallelismDetection.cpp
+++ b/mlir/lib/Analysis/TestParallelismDetection.cpp
@@ -45,7 +45,7 @@ FunctionPassBase *mlir::createParallelismDetectionTestPass() {
void TestParallelismDetection::runOnFunction() {
FuncOp f = getFunction();
OpBuilder b(f.getBody());
- f.walk<AffineForOp>([&](AffineForOp forOp) {
+ f.walk([&](AffineForOp forOp) {
if (isLoopParallel(forOp))
forOp.emitRemark("parallel loop");
else
diff --git a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
index d4293ba..ba0bc47 100644
--- a/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
+++ b/mlir/lib/Conversion/GPUToCUDA/ConvertLaunchFuncToCudaCalls.cpp
@@ -131,7 +131,7 @@ public:
initializeCachedTypes();
for (auto func : getModule().getOps<FuncOp>()) {
- func.walk<mlir::gpu::LaunchFuncOp>(
+ func.walk(
[this](mlir::gpu::LaunchFuncOp op) { translateGpuLaunchCalls(op); });
}
}
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 481ed24..dae8ae8 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -71,7 +71,7 @@ static FuncOp outlineKernelFunc(gpu::LaunchOp launchOp) {
outlinedFunc.setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
builder.getUnitAttr());
injectGpuIndexOperations(loc, outlinedFunc);
- outlinedFunc.walk<mlir::gpu::Return>([](mlir::gpu::Return op) {
+ outlinedFunc.walk([](mlir::gpu::Return op) {
OpBuilder replacer(op);
replacer.create<ReturnOp>(op.getLoc());
op.erase();
@@ -98,7 +98,7 @@ public:
void runOnModule() override {
ModuleManager moduleManager(getModule());
for (auto func : getModule().getOps<FuncOp>()) {
- func.walk<mlir::gpu::LaunchOp>([&](mlir::gpu::LaunchOp op) {
+ func.walk([&](mlir::gpu::LaunchOp op) {
FuncOp outlinedFunc = outlineKernelFunc(op);
moduleManager.insert(outlinedFunc);
convertToLaunchFuncOp(op, outlinedFunc);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
index 9f44b1c..d486064 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Fusion.cpp
@@ -239,8 +239,7 @@ static void fuseLinalgOps(FuncOp f, ArrayRef<int64_t> tileSizes) {
// 1. Record the linalg ops so we can traverse them in reverse order.
SmallVector<Operation *, 8> linalgOps;
- f.walk<LinalgOp>(
- [&](LinalgOp op) { linalgOps.push_back(op.getOperation()); });
+ f.walk([&](LinalgOp op) { linalgOps.push_back(op.getOperation()); });
// 2. Setup the dependences graph, aliases are populated lazily.
Aliases aliases;
diff --git a/mlir/lib/Dialect/Linalg/Transforms/LowerToLLVMDialect.cpp b/mlir/lib/Dialect/Linalg/Transforms/LowerToLLVMDialect.cpp
index 0bc355a..3c4123e 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/LowerToLLVMDialect.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/LowerToLLVMDialect.cpp
@@ -877,7 +877,7 @@ struct LowerLinalgToLLVMPass : public ModulePass<LowerLinalgToLLVMPass> {
// affine will look different than lowering to LLVM and it is still unclear how
// everything will be eventually structured.
static void lowerLinalgSubViewOps(FuncOp &f) {
- f.walk<SubViewOp>([&](SubViewOp op) {
+ f.walk([&](SubViewOp op) {
OpBuilder b(op);
ScopedContext scope(b, op.getLoc());
auto *view = op.getView();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index d2495bd..11b3334f 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -490,7 +490,7 @@ mlir::linalg::tileLinalgOp(LinalgOp op, ArrayRef<int64_t> tileSizes,
static void tileLinalgOps(FuncOp f, ArrayRef<int64_t> tileSizes,
bool promoteViews) {
OperationFolder folder;
- f.walk<LinalgOp>([promoteViews, tileSizes, &folder](LinalgOp op) {
+ f.walk([promoteViews, tileSizes, &folder](LinalgOp op) {
// TODO(ntv) some heuristic here to decide what to promote. Atm it is all or
// nothing.
SmallVector<bool, 8> viewsToPromote(op.getNumInputsAndOutputs(),
@@ -500,7 +500,7 @@ static void tileLinalgOps(FuncOp f, ArrayRef<int64_t> tileSizes,
if (opLoopsPair)
op.erase();
});
- f.walk<LinalgOp>([](LinalgOp op) {
+ f.walk([](LinalgOp op) {
if (!op.getOperation()->hasNoSideEffect())
return;
if (op.getOperation()->use_empty())
diff --git a/mlir/lib/IR/Block.cpp b/mlir/lib/IR/Block.cpp
index 28614ca..a225c4d 100644
--- a/mlir/lib/IR/Block.cpp
+++ b/mlir/lib/IR/Block.cpp
@@ -225,22 +225,6 @@ Block *Block::getSinglePredecessor() {
}
//===----------------------------------------------------------------------===//
-// Operation Walkers
-//===----------------------------------------------------------------------===//
-
-void Block::walk(llvm::function_ref<void(Operation *)> callback) {
- walk(begin(), end(), callback);
-}
-
-/// Walk the operations in the specified [begin, end) range of this block,
-/// calling the callback for each operation.
-void Block::walk(Block::iterator begin, Block::iterator end,
- llvm::function_ref<void(Operation *)> callback) {
- for (auto &op : llvm::make_early_inc_range(llvm::make_range(begin, end)))
- op.walk(callback);
-}
-
-//===----------------------------------------------------------------------===//
// Other
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/IR/Operation.cpp b/mlir/lib/IR/Operation.cpp
index a623e39..56c7431 100644
--- a/mlir/lib/IR/Operation.cpp
+++ b/mlir/lib/IR/Operation.cpp
@@ -290,19 +290,6 @@ void Operation::replaceUsesOfWith(Value *from, Value *to) {
}
//===----------------------------------------------------------------------===//
-// Operation Walkers
-//===----------------------------------------------------------------------===//
-
-void Operation::walk(llvm::function_ref<void(Operation *)> callback) {
- // Visit any internal operations.
- for (auto &region : getRegions())
- region.walk(callback);
-
- // Visit the current operation.
- callback(this);
-}
-
-//===----------------------------------------------------------------------===//
// Other
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/IR/Region.cpp b/mlir/lib/IR/Region.cpp
index 0947ddd..df0cf07 100644
--- a/mlir/lib/IR/Region.cpp
+++ b/mlir/lib/IR/Region.cpp
@@ -168,13 +168,6 @@ bool Region::isIsolatedFromAbove(llvm::Optional<Location> noteLoc) {
return isIsolatedAbove(*this, *this, noteLoc);
}
-/// Walk the operations in this block in postorder, calling the callback for
-/// each operation.
-void Region::walk(llvm::function_ref<void(Operation *)> callback) {
- for (auto &block : *this)
- block.walk(callback);
-}
-
Region *llvm::ilist_traits<::mlir::Block>::getParentRegion() {
size_t Offset(
size_t(&((Region *)nullptr->*Region::getSublistAccess(nullptr))));
diff --git a/mlir/lib/IR/Visitors.cpp b/mlir/lib/IR/Visitors.cpp
new file mode 100644
index 0000000..4622098
--- /dev/null
+++ b/mlir/lib/IR/Visitors.cpp
@@ -0,0 +1,34 @@
+//===- Visitors.cpp - MLIR Visitor Utilties -------------------------------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#include "mlir/IR/Visitors.h"
+#include "mlir/IR/Operation.h"
+
+using namespace mlir;
+
+/// Walk all of the operations nested under and including the given operations.
+void detail::walkOperations(Operation *op,
+ function_ref<void(Operation *op)> callback) {
+ // TODO(b/140235992) This walk should be iterative over the operations.
+ for (auto &region : op->getRegions())
+ for (auto &block : region)
+ // Early increment here in the case where the operation is erased.
+ for (auto &nestedOp : llvm::make_early_inc_range(block))
+ walkOperations(&nestedOp, callback);
+
+ callback(op);
+}
diff --git a/mlir/lib/Transforms/LoopCoalescing.cpp b/mlir/lib/Transforms/LoopCoalescing.cpp
index c4024fe..8e22060 100644
--- a/mlir/lib/Transforms/LoopCoalescing.cpp
+++ b/mlir/lib/Transforms/LoopCoalescing.cpp
@@ -34,7 +34,7 @@ public:
void runOnFunction() override {
FuncOp func = getFunction();
- func.walk<loop::ForOp>([](loop::ForOp op) {
+ func.walk([](loop::ForOp op) {
// Ignore nested loops.
if (op.getParentOfType<loop::ForOp>())
return;
diff --git a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
index 293e565..be39297 100644
--- a/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
+++ b/mlir/lib/Transforms/LoopInvariantCodeMotion.cpp
@@ -240,7 +240,7 @@ void LoopInvariantCodeMotion::runOnFunction() {
// Walk through all loops in a function in innermost-loop-first order. This
// way, we first LICM from the inner loop, and place the ops in
// the outer loop, which in turn can be further LICM'ed.
- getFunction().walk<AffineForOp>([&](AffineForOp op) {
+ getFunction().walk([&](AffineForOp op) {
LLVM_DEBUG(op.getOperation()->print(llvm::dbgs() << "\nOriginal loop\n"));
runOnAffineForOp(op);
});
diff --git a/mlir/lib/Transforms/LoopUnroll.cpp b/mlir/lib/Transforms/LoopUnroll.cpp
index 2acc5a9..5e13279 100644
--- a/mlir/lib/Transforms/LoopUnroll.cpp
+++ b/mlir/lib/Transforms/LoopUnroll.cpp
@@ -128,7 +128,7 @@ void LoopUnroll::runOnFunction() {
// Gathers all loops with trip count <= minTripCount. Do a post order walk
// so that loops are gathered from innermost to outermost (or else unrolling
// an outer one may delete gathered inner ones).
- getFunction().walk<AffineForOp>([&](AffineForOp forOp) {
+ getFunction().walk([&](AffineForOp forOp) {
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (tripCount.hasValue() && tripCount.getValue() <= clUnrollFullThreshold)
loops.push_back(forOp);
diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
index 9b71ada..f922d50 100644
--- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
+++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp
@@ -226,8 +226,7 @@ void MemRefDataFlowOpt::runOnFunction() {
memrefsToErase.clear();
// Walk all load's and perform load/store forwarding.
- f.walk<AffineLoadOp>(
- [&](AffineLoadOp loadOp) { forwardStoreToLoad(loadOp); });
+ f.walk([&](AffineLoadOp loadOp) { forwardStoreToLoad(loadOp); });
// Erase all load op's whose results were replaced with store fwd'ed ones.
for (auto *loadOp : loadOpsToErase) {
diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp
index a814af9..74b06aa 100644
--- a/mlir/lib/Transforms/PipelineDataTransfer.cpp
+++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp
@@ -144,8 +144,7 @@ void PipelineDataTransfer::runOnFunction() {
// gets deleted and replaced by a prologue, a new steady-state loop and an
// epilogue).
forOps.clear();
- getFunction().walk<AffineForOp>(
- [&](AffineForOp forOp) { forOps.push_back(forOp); });
+ getFunction().walk([&](AffineForOp forOp) { forOps.push_back(forOp); });
for (auto forOp : forOps)
runOnAffineForOp(forOp);
}
diff --git a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
index 8b31478..99f315e 100644
--- a/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopFusionUtils.cpp
@@ -258,7 +258,7 @@ FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
/// returns false otherwise.
bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
bool ret = true;
- forOpRoot.getOperation()->walk<AffineForOp>([&](AffineForOp forOp) {
+ forOpRoot.walk([&](AffineForOp forOp) {
auto *childForOp = forOp.getOperation();
auto *parentForOp = forOp.getOperation()->getParentOp();
if (!llvm::isa<FuncOp>(parentForOp)) {
diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp
index d6a31f9..91f72f3 100644
--- a/mlir/lib/Transforms/Utils/LoopUtils.cpp
+++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp
@@ -165,8 +165,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
/// their body into the containing Block.
void mlir::promoteSingleIterationLoops(FuncOp f) {
// Gathers all innermost loops through a post order pruned walk.
- f.walk<AffineForOp>(
- [](AffineForOp forOp) { promoteIfSingleIteration(forOp); });
+ f.walk([](AffineForOp forOp) { promoteIfSingleIteration(forOp); });
}
/// Generates a 'affine.for' op with the specified lower and upper bounds
diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp
index cbf616e..89e3da7 100644
--- a/mlir/lib/Transforms/Vectorize.cpp
+++ b/mlir/lib/Transforms/Vectorize.cpp
@@ -1240,7 +1240,7 @@ void Vectorize::runOnFunction() {
NestedPatternContext mlContext;
llvm::DenseSet<Operation *> parallelLoops;
- f.walk<AffineForOp>([&parallelLoops](AffineForOp loop) {
+ f.walk([&parallelLoops](AffineForOp loop) {
if (isLoopParallel(loop))
parallelLoops.insert(loop);
});