aboutsummaryrefslogtreecommitdiff
path: root/mlir/lib/Rewrite/ByteCode.cpp
diff options
context:
space:
mode:
authorStanislav Funiak <stano@cerebras.net>2022-01-04 08:03:29 +0530
committerUday Bondhugula <uday@polymagelabs.com>2022-01-04 08:03:45 +0530
commitb4130e9eadfe46b4d3380c40ce8c3e900a0fd21b (patch)
treef8358248b443205dc06c6eaf5ec904071233ce4c /mlir/lib/Rewrite/ByteCode.cpp
parent138803e017739c81b43b73631c7096bfc4d097d8 (diff)
downloadllvm-b4130e9eadfe46b4d3380c40ce8c3e900a0fd21b.zip
llvm-b4130e9eadfe46b4d3380c40ce8c3e900a0fd21b.tar.gz
llvm-b4130e9eadfe46b4d3380c40ce8c3e900a0fd21b.tar.bz2
[MLIR][PDL] Integration test of multi-root matching and related fixes.
This diff adds an integration test to multi-root PDL matching. It consists of two subtests: 1) A 1-layer perceptron with split forward / backward operations. 2) A 2-layer perceptron with fused forward / backward operations. These tests use a collection of hand-written patterns and TensorFlow operations to be matched. The first test has a DAG / SSA dominant resulting match; the second does not and is therefore stored in a graph region. This diff also includes two bug fixes: 1) Mark the pdl_interp dialect as a dependent in the TestPDLByteCodePass. This is needed, because we create ops from that dialect as a part of the PDL-to-PDLInterp lowering. 2) Fix of the starting index in the liveness range for the ForEach operations (bug exposed by the integration test). Reviewed By: Mogball Differential Revision: https://reviews.llvm.org/D116082
Diffstat (limited to 'mlir/lib/Rewrite/ByteCode.cpp')
-rw-r--r--mlir/lib/Rewrite/ByteCode.cpp24
1 files changed, 18 insertions, 6 deletions
diff --git a/mlir/lib/Rewrite/ByteCode.cpp b/mlir/lib/Rewrite/ByteCode.cpp
index 765c47b2..d6a07f9 100644
--- a/mlir/lib/Rewrite/ByteCode.cpp
+++ b/mlir/lib/Rewrite/ByteCode.cpp
@@ -551,10 +551,22 @@ void Generator::allocateMemoryIndices(FuncOp matcherFunc,
// finding the minimal number of overlapping live ranges. This is essentially
// a simplified form of register allocation where we don't necessarily have a
// limited number of registers, but we still want to minimize the number used.
- DenseMap<Operation *, unsigned> opToIndex;
- matcherFunc.getBody().walk([&](Operation *op) {
- opToIndex.insert(std::make_pair(op, opToIndex.size()));
- });
+ DenseMap<Operation *, unsigned> opToFirstIndex;
+ DenseMap<Operation *, unsigned> opToLastIndex;
+
+ // A custom walk that marks the first and the last index of each operation.
+ // The entry marks the beginning of the liveness range for this operation,
+ // followed by nested operations, followed by the end of the liveness range.
+ unsigned index = 0;
+ llvm::unique_function<void(Operation *)> walk = [&](Operation *op) {
+ opToFirstIndex.try_emplace(op, index++);
+ for (Region &region : op->getRegions())
+ for (Block &block : region.getBlocks())
+ for (Operation &nested : block)
+ walk(&nested);
+ opToLastIndex.try_emplace(op, index++);
+ };
+ walk(matcherFunc);
// Liveness info for each of the defs within the matcher.
ByteCodeLiveRange::Allocator allocator;
@@ -578,8 +590,8 @@ void Generator::allocateMemoryIndices(FuncOp matcherFunc,
// Set indices for the range of this block that the value is used.
auto defRangeIt = valueDefRanges.try_emplace(value, allocator).first;
defRangeIt->second.liveness->insert(
- opToIndex[firstUseOrDef],
- opToIndex[info->getEndOperation(value, firstUseOrDef)],
+ opToFirstIndex[firstUseOrDef],
+ opToLastIndex[info->getEndOperation(value, firstUseOrDef)],
/*dummyValue*/ 0);
// Check to see if this value is a range type.