aboutsummaryrefslogtreecommitdiff
path: root/mlir/examples
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/examples')
-rw-r--r--mlir/examples/standalone/standalone-opt/CMakeLists.txt14
-rw-r--r--mlir/examples/standalone/standalone-opt/standalone-opt.cpp2
-rw-r--r--mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp4
-rw-r--r--mlir/examples/toy/Ch5/CMakeLists.txt9
-rw-r--r--mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp126
-rw-r--r--mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp4
-rw-r--r--mlir/examples/toy/Ch5/toyc.cpp1
-rw-r--r--mlir/examples/toy/Ch6/CMakeLists.txt11
-rw-r--r--mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp126
-rw-r--r--mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp12
-rw-r--r--mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp4
-rw-r--r--mlir/examples/toy/Ch6/toyc.cpp1
-rw-r--r--mlir/examples/toy/Ch7/CMakeLists.txt11
-rw-r--r--mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp122
-rw-r--r--mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp12
-rw-r--r--mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp4
-rw-r--r--mlir/examples/toy/Ch7/toyc.cpp1
-rw-r--r--mlir/examples/transform-opt/CMakeLists.txt10
-rw-r--r--mlir/examples/transform-opt/mlir-transform-opt.cpp1
-rw-r--r--mlir/examples/transform/Ch4/lib/MyExtension.cpp11
20 files changed, 212 insertions, 274 deletions
diff --git a/mlir/examples/standalone/standalone-opt/CMakeLists.txt b/mlir/examples/standalone/standalone-opt/CMakeLists.txt
index 27f8128..4b38de7 100644
--- a/mlir/examples/standalone/standalone-opt/CMakeLists.txt
+++ b/mlir/examples/standalone/standalone-opt/CMakeLists.txt
@@ -1,12 +1,10 @@
-get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
-get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
set(LIBS
- ${dialect_libs}
- ${conversion_libs}
- MLIRArithDialect
- MLIROptLib
- MLIRStandalone
- )
+ MLIRArithDialect
+ MLIROptLib
+ MLIRRegisterAllDialects
+ MLIRRegisterAllPasses
+ MLIRStandalone
+ )
add_llvm_executable(standalone-opt standalone-opt.cpp)
llvm_update_compile_flags(standalone-opt)
diff --git a/mlir/examples/standalone/standalone-opt/standalone-opt.cpp b/mlir/examples/standalone/standalone-opt/standalone-opt.cpp
index e39fa96..eebfcb7 100644
--- a/mlir/examples/standalone/standalone-opt/standalone-opt.cpp
+++ b/mlir/examples/standalone/standalone-opt/standalone-opt.cpp
@@ -6,6 +6,8 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/Dialect/Arith/IR/Arith.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllPasses.h"
diff --git a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp
index 2522abe..a552e1f0 100644
--- a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp
+++ b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp
@@ -23,7 +23,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugLog.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -81,7 +81,7 @@ struct ShapeInferencePass
opWorklist.erase(op);
// Ask the operation to infer its output shapes.
- LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n");
+ LDBG() << "Inferring shape for: " << *op;
if (auto shapeOp = dyn_cast<ShapeInference>(op)) {
shapeOp.inferShapes();
} else {
diff --git a/mlir/examples/toy/Ch5/CMakeLists.txt b/mlir/examples/toy/Ch5/CMakeLists.txt
index f4f0fec..454ca56 100644
--- a/mlir/examples/toy/Ch5/CMakeLists.txt
+++ b/mlir/examples/toy/Ch5/CMakeLists.txt
@@ -27,12 +27,8 @@ add_toy_chapter(toyc-ch5
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/)
-get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
-get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS)
target_link_libraries(toyc-ch5
PRIVATE
- ${dialect_libs}
- ${extension_libs}
MLIRAnalysis
MLIRCallInterfaces
MLIRCastInterfaces
@@ -40,6 +36,9 @@ target_link_libraries(toyc-ch5
MLIRIR
MLIRParser
MLIRPass
+ MLIRRegisterAllDialects
+ MLIRRegisterAllExtensions
MLIRSideEffectInterfaces
MLIRSupport
- MLIRTransforms)
+ MLIRTransforms
+ )
diff --git a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
index d65c89c..2969d3a 100644
--- a/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch5/mlir/LowerToAffineLoops.cpp
@@ -44,7 +44,7 @@
using namespace mlir;
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns
+// ToyToAffine Conversion Patterns
//===----------------------------------------------------------------------===//
/// Convert the given RankedTensorType into the corresponding MemRefType.
@@ -69,15 +69,13 @@ static Value insertAllocAndDealloc(MemRefType type, Location loc,
}
/// This defines the function type used to process an iteration of a lowered
-/// loop. It takes as input an OpBuilder, an range of memRefOperands
-/// corresponding to the operands of the input operation, and the range of loop
-/// induction variables for the iteration. It returns a value to store at the
-/// current index of the iteration.
-using LoopIterationFn = function_ref<Value(
- OpBuilder &rewriter, ValueRange memRefOperands, ValueRange loopIvs)>;
-
-static void lowerOpToLoops(Operation *op, ValueRange operands,
- PatternRewriter &rewriter,
+/// loop. It takes as input an OpBuilder and the range of loop induction
+/// variables for the iteration. It returns a value to store at the current
+/// index of the iteration.
+using LoopIterationFn =
+ function_ref<Value(OpBuilder &rewriter, ValueRange loopIvs)>;
+
+static void lowerOpToLoops(Operation *op, PatternRewriter &rewriter,
LoopIterationFn processIteration) {
auto tensorType = llvm::cast<RankedTensorType>((*op->result_type_begin()));
auto loc = op->getLoc();
@@ -95,10 +93,10 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
affine::buildAffineLoopNest(
rewriter, loc, lowerBounds, tensorType.getShape(), steps,
[&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) {
- // Call the processing function with the rewriter, the memref operands,
- // and the loop induction variables. This function will return the value
- // to store at the current index.
- Value valueToStore = processIteration(nestedBuilder, operands, ivs);
+ // Call the processing function with the rewriter and the loop
+ // induction variables. This function will return the value to store at
+ // the current index.
+ Value valueToStore = processIteration(nestedBuilder, ivs);
affine::AffineStoreOp::create(nestedBuilder, loc, valueToStore, alloc,
ivs);
});
@@ -109,38 +107,30 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
namespace {
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Binary operations
+// ToyToAffine Conversion Patterns: Binary operations
//===----------------------------------------------------------------------===//
template <typename BinaryOp, typename LoweredBinaryOp>
-struct BinaryOpLowering : public ConversionPattern {
- BinaryOpLowering(MLIRContext *ctx)
- : ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
+struct BinaryOpLowering : public OpConversionPattern<BinaryOp> {
+ using OpConversionPattern<BinaryOp>::OpConversionPattern;
+ using OpAdaptor = typename OpConversionPattern<BinaryOp>::OpAdaptor;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(BinaryOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
- lowerOpToLoops(op, operands, rewriter,
- [loc](OpBuilder &builder, ValueRange memRefOperands,
- ValueRange loopIvs) {
- // Generate an adaptor for the remapped operands of the
- // BinaryOp. This allows for using the nice named accessors
- // that are generated by the ODS.
- typename BinaryOp::Adaptor binaryAdaptor(memRefOperands);
-
- // Generate loads for the element of 'lhs' and 'rhs' at the
- // inner loop.
- auto loadedLhs = affine::AffineLoadOp::create(
- builder, loc, binaryAdaptor.getLhs(), loopIvs);
- auto loadedRhs = affine::AffineLoadOp::create(
- builder, loc, binaryAdaptor.getRhs(), loopIvs);
-
- // Create the binary operation performed on the loaded
- // values.
- return LoweredBinaryOp::create(builder, loc, loadedLhs,
- loadedRhs);
- });
+ lowerOpToLoops(op, rewriter, [&](OpBuilder &builder, ValueRange loopIvs) {
+ // Generate loads for the element of 'lhs' and 'rhs' at the
+ // inner loop.
+ auto loadedLhs =
+ affine::AffineLoadOp::create(builder, loc, adaptor.getLhs(), loopIvs);
+ auto loadedRhs =
+ affine::AffineLoadOp::create(builder, loc, adaptor.getRhs(), loopIvs);
+
+ // Create the binary operation performed on the loaded
+ // values.
+ return LoweredBinaryOp::create(builder, loc, loadedLhs, loadedRhs);
+ });
return success();
}
};
@@ -148,14 +138,15 @@ using AddOpLowering = BinaryOpLowering<toy::AddOp, arith::AddFOp>;
using MulOpLowering = BinaryOpLowering<toy::MulOp, arith::MulFOp>;
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Constant operations
+// ToyToAffine Conversion Patterns: Constant operations
//===----------------------------------------------------------------------===//
-struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
- using OpRewritePattern<toy::ConstantOp>::OpRewritePattern;
+struct ConstantOpLowering : public OpConversionPattern<toy::ConstantOp> {
+ using OpConversionPattern<toy::ConstantOp>::OpConversionPattern;
- LogicalResult matchAndRewrite(toy::ConstantOp op,
- PatternRewriter &rewriter) const final {
+ LogicalResult
+ matchAndRewrite(toy::ConstantOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
DenseElementsAttr constantValue = op.getValue();
Location loc = op.getLoc();
@@ -216,7 +207,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Func operations
+// ToyToAffine Conversion Patterns: Func operations
//===----------------------------------------------------------------------===//
struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
@@ -247,7 +238,7 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Print operations
+// ToyToAffine Conversion Patterns: Print operations
//===----------------------------------------------------------------------===//
struct PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
@@ -265,14 +256,15 @@ struct PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Return operations
+// ToyToAffine Conversion Patterns: Return operations
//===----------------------------------------------------------------------===//
-struct ReturnOpLowering : public OpRewritePattern<toy::ReturnOp> {
- using OpRewritePattern<toy::ReturnOp>::OpRewritePattern;
+struct ReturnOpLowering : public OpConversionPattern<toy::ReturnOp> {
+ using OpConversionPattern<toy::ReturnOp>::OpConversionPattern;
- LogicalResult matchAndRewrite(toy::ReturnOp op,
- PatternRewriter &rewriter) const final {
+ LogicalResult
+ matchAndRewrite(toy::ReturnOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
// During this lowering, we expect that all function calls have been
// inlined.
if (op.hasOperand())
@@ -285,32 +277,24 @@ struct ReturnOpLowering : public OpRewritePattern<toy::ReturnOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Transpose operations
+// ToyToAffine Conversion Patterns: Transpose operations
//===----------------------------------------------------------------------===//
-struct TransposeOpLowering : public ConversionPattern {
- TransposeOpLowering(MLIRContext *ctx)
- : ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {}
+struct TransposeOpLowering : public OpConversionPattern<toy::TransposeOp> {
+ using OpConversionPattern<toy::TransposeOp>::OpConversionPattern;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(toy::TransposeOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
- lowerOpToLoops(op, operands, rewriter,
- [loc](OpBuilder &builder, ValueRange memRefOperands,
- ValueRange loopIvs) {
- // Generate an adaptor for the remapped operands of the
- // TransposeOp. This allows for using the nice named
- // accessors that are generated by the ODS.
- toy::TransposeOpAdaptor transposeAdaptor(memRefOperands);
- Value input = transposeAdaptor.getInput();
-
- // Transpose the elements by generating a load from the
- // reverse indices.
- SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return affine::AffineLoadOp::create(builder, loc, input,
- reverseIvs);
- });
+ lowerOpToLoops(op, rewriter, [&](OpBuilder &builder, ValueRange loopIvs) {
+ Value input = adaptor.getInput();
+
+ // Transpose the elements by generating a load from the
+ // reverse indices.
+ SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
+ return affine::AffineLoadOp::create(builder, loc, input, reverseIvs);
+ });
return success();
}
};
diff --git a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp
index 2522abe..a552e1f0 100644
--- a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp
+++ b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp
@@ -23,7 +23,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugLog.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -81,7 +81,7 @@ struct ShapeInferencePass
opWorklist.erase(op);
// Ask the operation to infer its output shapes.
- LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n");
+ LDBG() << "Inferring shape for: " << *op;
if (auto shapeOp = dyn_cast<ShapeInference>(op)) {
shapeOp.inferShapes();
} else {
diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp
index 6a0c631..afdf782 100644
--- a/mlir/examples/toy/Ch5/toyc.cpp
+++ b/mlir/examples/toy/Ch5/toyc.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Diagnostics.h"
#include "toy/AST.h"
#include "toy/Dialect.h"
diff --git a/mlir/examples/toy/Ch6/CMakeLists.txt b/mlir/examples/toy/Ch6/CMakeLists.txt
index 283b895..73df602 100644
--- a/mlir/examples/toy/Ch6/CMakeLists.txt
+++ b/mlir/examples/toy/Ch6/CMakeLists.txt
@@ -37,14 +37,8 @@ add_toy_chapter(toyc-ch6
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/)
-get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
-get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
-get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS)
target_link_libraries(toyc-ch6
PRIVATE
- ${dialect_libs}
- ${conversion_libs}
- ${extension_libs}
MLIRAnalysis
MLIRBuiltinToLLVMIRTranslation
MLIRCallInterfaces
@@ -58,8 +52,11 @@ target_link_libraries(toyc-ch6
MLIRMemRefDialect
MLIRParser
MLIRPass
+ MLIRRegisterAllDialects
+ MLIRRegisterAllExtensions
+ MLIRRegisterAllPasses
MLIRSideEffectInterfaces
MLIRSupport
MLIRTargetLLVMIRExport
MLIRTransforms
- )
+ )
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
index d65c89c..2969d3a 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToAffineLoops.cpp
@@ -44,7 +44,7 @@
using namespace mlir;
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns
+// ToyToAffine Conversion Patterns
//===----------------------------------------------------------------------===//
/// Convert the given RankedTensorType into the corresponding MemRefType.
@@ -69,15 +69,13 @@ static Value insertAllocAndDealloc(MemRefType type, Location loc,
}
/// This defines the function type used to process an iteration of a lowered
-/// loop. It takes as input an OpBuilder, an range of memRefOperands
-/// corresponding to the operands of the input operation, and the range of loop
-/// induction variables for the iteration. It returns a value to store at the
-/// current index of the iteration.
-using LoopIterationFn = function_ref<Value(
- OpBuilder &rewriter, ValueRange memRefOperands, ValueRange loopIvs)>;
-
-static void lowerOpToLoops(Operation *op, ValueRange operands,
- PatternRewriter &rewriter,
+/// loop. It takes as input an OpBuilder and the range of loop induction
+/// variables for the iteration. It returns a value to store at the current
+/// index of the iteration.
+using LoopIterationFn =
+ function_ref<Value(OpBuilder &rewriter, ValueRange loopIvs)>;
+
+static void lowerOpToLoops(Operation *op, PatternRewriter &rewriter,
LoopIterationFn processIteration) {
auto tensorType = llvm::cast<RankedTensorType>((*op->result_type_begin()));
auto loc = op->getLoc();
@@ -95,10 +93,10 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
affine::buildAffineLoopNest(
rewriter, loc, lowerBounds, tensorType.getShape(), steps,
[&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) {
- // Call the processing function with the rewriter, the memref operands,
- // and the loop induction variables. This function will return the value
- // to store at the current index.
- Value valueToStore = processIteration(nestedBuilder, operands, ivs);
+ // Call the processing function with the rewriter and the loop
+ // induction variables. This function will return the value to store at
+ // the current index.
+ Value valueToStore = processIteration(nestedBuilder, ivs);
affine::AffineStoreOp::create(nestedBuilder, loc, valueToStore, alloc,
ivs);
});
@@ -109,38 +107,30 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
namespace {
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Binary operations
+// ToyToAffine Conversion Patterns: Binary operations
//===----------------------------------------------------------------------===//
template <typename BinaryOp, typename LoweredBinaryOp>
-struct BinaryOpLowering : public ConversionPattern {
- BinaryOpLowering(MLIRContext *ctx)
- : ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
+struct BinaryOpLowering : public OpConversionPattern<BinaryOp> {
+ using OpConversionPattern<BinaryOp>::OpConversionPattern;
+ using OpAdaptor = typename OpConversionPattern<BinaryOp>::OpAdaptor;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(BinaryOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
- lowerOpToLoops(op, operands, rewriter,
- [loc](OpBuilder &builder, ValueRange memRefOperands,
- ValueRange loopIvs) {
- // Generate an adaptor for the remapped operands of the
- // BinaryOp. This allows for using the nice named accessors
- // that are generated by the ODS.
- typename BinaryOp::Adaptor binaryAdaptor(memRefOperands);
-
- // Generate loads for the element of 'lhs' and 'rhs' at the
- // inner loop.
- auto loadedLhs = affine::AffineLoadOp::create(
- builder, loc, binaryAdaptor.getLhs(), loopIvs);
- auto loadedRhs = affine::AffineLoadOp::create(
- builder, loc, binaryAdaptor.getRhs(), loopIvs);
-
- // Create the binary operation performed on the loaded
- // values.
- return LoweredBinaryOp::create(builder, loc, loadedLhs,
- loadedRhs);
- });
+ lowerOpToLoops(op, rewriter, [&](OpBuilder &builder, ValueRange loopIvs) {
+ // Generate loads for the element of 'lhs' and 'rhs' at the
+ // inner loop.
+ auto loadedLhs =
+ affine::AffineLoadOp::create(builder, loc, adaptor.getLhs(), loopIvs);
+ auto loadedRhs =
+ affine::AffineLoadOp::create(builder, loc, adaptor.getRhs(), loopIvs);
+
+ // Create the binary operation performed on the loaded
+ // values.
+ return LoweredBinaryOp::create(builder, loc, loadedLhs, loadedRhs);
+ });
return success();
}
};
@@ -148,14 +138,15 @@ using AddOpLowering = BinaryOpLowering<toy::AddOp, arith::AddFOp>;
using MulOpLowering = BinaryOpLowering<toy::MulOp, arith::MulFOp>;
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Constant operations
+// ToyToAffine Conversion Patterns: Constant operations
//===----------------------------------------------------------------------===//
-struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
- using OpRewritePattern<toy::ConstantOp>::OpRewritePattern;
+struct ConstantOpLowering : public OpConversionPattern<toy::ConstantOp> {
+ using OpConversionPattern<toy::ConstantOp>::OpConversionPattern;
- LogicalResult matchAndRewrite(toy::ConstantOp op,
- PatternRewriter &rewriter) const final {
+ LogicalResult
+ matchAndRewrite(toy::ConstantOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
DenseElementsAttr constantValue = op.getValue();
Location loc = op.getLoc();
@@ -216,7 +207,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Func operations
+// ToyToAffine Conversion Patterns: Func operations
//===----------------------------------------------------------------------===//
struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
@@ -247,7 +238,7 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Print operations
+// ToyToAffine Conversion Patterns: Print operations
//===----------------------------------------------------------------------===//
struct PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
@@ -265,14 +256,15 @@ struct PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Return operations
+// ToyToAffine Conversion Patterns: Return operations
//===----------------------------------------------------------------------===//
-struct ReturnOpLowering : public OpRewritePattern<toy::ReturnOp> {
- using OpRewritePattern<toy::ReturnOp>::OpRewritePattern;
+struct ReturnOpLowering : public OpConversionPattern<toy::ReturnOp> {
+ using OpConversionPattern<toy::ReturnOp>::OpConversionPattern;
- LogicalResult matchAndRewrite(toy::ReturnOp op,
- PatternRewriter &rewriter) const final {
+ LogicalResult
+ matchAndRewrite(toy::ReturnOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
// During this lowering, we expect that all function calls have been
// inlined.
if (op.hasOperand())
@@ -285,32 +277,24 @@ struct ReturnOpLowering : public OpRewritePattern<toy::ReturnOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Transpose operations
+// ToyToAffine Conversion Patterns: Transpose operations
//===----------------------------------------------------------------------===//
-struct TransposeOpLowering : public ConversionPattern {
- TransposeOpLowering(MLIRContext *ctx)
- : ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {}
+struct TransposeOpLowering : public OpConversionPattern<toy::TransposeOp> {
+ using OpConversionPattern<toy::TransposeOp>::OpConversionPattern;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(toy::TransposeOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
- lowerOpToLoops(op, operands, rewriter,
- [loc](OpBuilder &builder, ValueRange memRefOperands,
- ValueRange loopIvs) {
- // Generate an adaptor for the remapped operands of the
- // TransposeOp. This allows for using the nice named
- // accessors that are generated by the ODS.
- toy::TransposeOpAdaptor transposeAdaptor(memRefOperands);
- Value input = transposeAdaptor.getInput();
-
- // Transpose the elements by generating a load from the
- // reverse indices.
- SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return affine::AffineLoadOp::create(builder, loc, input,
- reverseIvs);
- });
+ lowerOpToLoops(op, rewriter, [&](OpBuilder &builder, ValueRange loopIvs) {
+ Value input = adaptor.getInput();
+
+ // Transpose the elements by generating a load from the
+ // reverse indices.
+ SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
+ return affine::AffineLoadOp::create(builder, loc, input, reverseIvs);
+ });
return success();
}
};
diff --git a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
index e0950ef..987dfa1 100644
--- a/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch6/mlir/LowerToLLVM.cpp
@@ -55,19 +55,18 @@
using namespace mlir;
//===----------------------------------------------------------------------===//
-// ToyToLLVM RewritePatterns
+// ToyToLLVM Conversion Patterns
//===----------------------------------------------------------------------===//
namespace {
/// Lowers `toy.print` to a loop nest calling `printf` on each of the individual
/// elements of the array.
-class PrintOpLowering : public ConversionPattern {
+class PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
public:
- explicit PrintOpLowering(MLIRContext *context)
- : ConversionPattern(toy::PrintOp::getOperationName(), 1, context) {}
+ using OpConversionPattern<toy::PrintOp>::OpConversionPattern;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(toy::PrintOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto *context = rewriter.getContext();
auto memRefType = llvm::cast<MemRefType>((*op->operand_type_begin()));
@@ -108,9 +107,8 @@ public:
}
// Generate a call to printf for the current element of the loop.
- auto printOp = cast<toy::PrintOp>(op);
auto elementLoad =
- memref::LoadOp::create(rewriter, loc, printOp.getInput(), loopIvs);
+ memref::LoadOp::create(rewriter, loc, op.getInput(), loopIvs);
LLVM::CallOp::create(rewriter, loc, getPrintfType(context), printfRef,
ArrayRef<Value>({formatSpecifierCst, elementLoad}));
diff --git a/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp
index 2522abe..a552e1f0 100644
--- a/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp
+++ b/mlir/examples/toy/Ch6/mlir/ShapeInferencePass.cpp
@@ -23,7 +23,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugLog.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -81,7 +81,7 @@ struct ShapeInferencePass
opWorklist.erase(op);
// Ask the operation to infer its output shapes.
- LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n");
+ LDBG() << "Inferring shape for: " << *op;
if (auto shapeOp = dyn_cast<ShapeInference>(op)) {
shapeOp.inferShapes();
} else {
diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp
index dccab91..4a5e109 100644
--- a/mlir/examples/toy/Ch6/toyc.cpp
+++ b/mlir/examples/toy/Ch6/toyc.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.h"
#include "toy/AST.h"
diff --git a/mlir/examples/toy/Ch7/CMakeLists.txt b/mlir/examples/toy/Ch7/CMakeLists.txt
index 362ab51..a489ae5 100644
--- a/mlir/examples/toy/Ch7/CMakeLists.txt
+++ b/mlir/examples/toy/Ch7/CMakeLists.txt
@@ -36,14 +36,8 @@ add_toy_chapter(toyc-ch7
include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${CMAKE_CURRENT_BINARY_DIR}/include/)
-get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
-get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
-get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS)
target_link_libraries(toyc-ch7
PRIVATE
- ${dialect_libs}
- ${conversion_libs}
- ${extension_libs}
MLIRAnalysis
MLIRBuiltinToLLVMIRTranslation
MLIRCallInterfaces
@@ -56,7 +50,10 @@ target_link_libraries(toyc-ch7
MLIRMemRefDialect
MLIRParser
MLIRPass
+ MLIRRegisterAllDialects
+ MLIRRegisterAllExtensions
+ MLIRRegisterAllPasses
MLIRSideEffectInterfaces
MLIRTargetLLVMIRExport
MLIRTransforms
- )
+ )
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
index d65c89c..cbe4236 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToAffineLoops.cpp
@@ -44,7 +44,7 @@
using namespace mlir;
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns
+// ToyToAffine Conversion Patterns
//===----------------------------------------------------------------------===//
/// Convert the given RankedTensorType into the corresponding MemRefType.
@@ -69,15 +69,13 @@ static Value insertAllocAndDealloc(MemRefType type, Location loc,
}
/// This defines the function type used to process an iteration of a lowered
-/// loop. It takes as input an OpBuilder, an range of memRefOperands
-/// corresponding to the operands of the input operation, and the range of loop
-/// induction variables for the iteration. It returns a value to store at the
-/// current index of the iteration.
-using LoopIterationFn = function_ref<Value(
- OpBuilder &rewriter, ValueRange memRefOperands, ValueRange loopIvs)>;
-
-static void lowerOpToLoops(Operation *op, ValueRange operands,
- PatternRewriter &rewriter,
+/// loop. It takes as input an OpBuilder and the range of loop induction
+/// variables for the iteration. It returns a value to store at the current
+/// index of the iteration.
+using LoopIterationFn =
+ function_ref<Value(OpBuilder &rewriter, ValueRange loopIvs)>;
+
+static void lowerOpToLoops(Operation *op, PatternRewriter &rewriter,
LoopIterationFn processIteration) {
auto tensorType = llvm::cast<RankedTensorType>((*op->result_type_begin()));
auto loc = op->getLoc();
@@ -95,10 +93,10 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
affine::buildAffineLoopNest(
rewriter, loc, lowerBounds, tensorType.getShape(), steps,
[&](OpBuilder &nestedBuilder, Location loc, ValueRange ivs) {
- // Call the processing function with the rewriter, the memref operands,
+ // Call the processing function with the rewriter
// and the loop induction variables. This function will return the value
// to store at the current index.
- Value valueToStore = processIteration(nestedBuilder, operands, ivs);
+ Value valueToStore = processIteration(nestedBuilder, ivs);
affine::AffineStoreOp::create(nestedBuilder, loc, valueToStore, alloc,
ivs);
});
@@ -109,38 +107,30 @@ static void lowerOpToLoops(Operation *op, ValueRange operands,
namespace {
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Binary operations
+// ToyToAffine Conversion Patterns: Binary operations
//===----------------------------------------------------------------------===//
template <typename BinaryOp, typename LoweredBinaryOp>
-struct BinaryOpLowering : public ConversionPattern {
- BinaryOpLowering(MLIRContext *ctx)
- : ConversionPattern(BinaryOp::getOperationName(), 1, ctx) {}
+struct BinaryOpLowering : public OpConversionPattern<BinaryOp> {
+ using OpConversionPattern<BinaryOp>::OpConversionPattern;
+ using OpAdaptor = typename OpConversionPattern<BinaryOp>::OpAdaptor;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(BinaryOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
- lowerOpToLoops(op, operands, rewriter,
- [loc](OpBuilder &builder, ValueRange memRefOperands,
- ValueRange loopIvs) {
- // Generate an adaptor for the remapped operands of the
- // BinaryOp. This allows for using the nice named accessors
- // that are generated by the ODS.
- typename BinaryOp::Adaptor binaryAdaptor(memRefOperands);
-
- // Generate loads for the element of 'lhs' and 'rhs' at the
- // inner loop.
- auto loadedLhs = affine::AffineLoadOp::create(
- builder, loc, binaryAdaptor.getLhs(), loopIvs);
- auto loadedRhs = affine::AffineLoadOp::create(
- builder, loc, binaryAdaptor.getRhs(), loopIvs);
-
- // Create the binary operation performed on the loaded
- // values.
- return LoweredBinaryOp::create(builder, loc, loadedLhs,
- loadedRhs);
- });
+ lowerOpToLoops(op, rewriter, [&](OpBuilder &builder, ValueRange loopIvs) {
+ // Generate loads for the element of 'lhs' and 'rhs' at the
+ // inner loop.
+ auto loadedLhs =
+ affine::AffineLoadOp::create(builder, loc, adaptor.getLhs(), loopIvs);
+ auto loadedRhs =
+ affine::AffineLoadOp::create(builder, loc, adaptor.getRhs(), loopIvs);
+
+ // Create the binary operation performed on the loaded
+ // values.
+ return LoweredBinaryOp::create(builder, loc, loadedLhs, loadedRhs);
+ });
return success();
}
};
@@ -148,14 +138,15 @@ using AddOpLowering = BinaryOpLowering<toy::AddOp, arith::AddFOp>;
using MulOpLowering = BinaryOpLowering<toy::MulOp, arith::MulFOp>;
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Constant operations
+// ToyToAffine Conversion Patterns: Constant operations
//===----------------------------------------------------------------------===//
-struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
- using OpRewritePattern<toy::ConstantOp>::OpRewritePattern;
+struct ConstantOpLowering : public OpConversionPattern<toy::ConstantOp> {
+ using OpConversionPattern<toy::ConstantOp>::OpConversionPattern;
- LogicalResult matchAndRewrite(toy::ConstantOp op,
- PatternRewriter &rewriter) const final {
+ LogicalResult
+ matchAndRewrite(toy::ConstantOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
DenseElementsAttr constantValue = op.getValue();
Location loc = op.getLoc();
@@ -216,7 +207,7 @@ struct ConstantOpLowering : public OpRewritePattern<toy::ConstantOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Func operations
+// ToyToAffine Conversion Patterns: Func operations
//===----------------------------------------------------------------------===//
struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
@@ -247,7 +238,7 @@ struct FuncOpLowering : public OpConversionPattern<toy::FuncOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Print operations
+// ToyToAffine Conversion Patterns: Print operations
//===----------------------------------------------------------------------===//
struct PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
@@ -265,14 +256,15 @@ struct PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Return operations
+// ToyToAffine Conversion Patterns: Return operations
//===----------------------------------------------------------------------===//
-struct ReturnOpLowering : public OpRewritePattern<toy::ReturnOp> {
- using OpRewritePattern<toy::ReturnOp>::OpRewritePattern;
+struct ReturnOpLowering : public OpConversionPattern<toy::ReturnOp> {
+ using OpConversionPattern<toy::ReturnOp>::OpConversionPattern;
- LogicalResult matchAndRewrite(toy::ReturnOp op,
- PatternRewriter &rewriter) const final {
+ LogicalResult
+ matchAndRewrite(toy::ReturnOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const final {
// During this lowering, we expect that all function calls have been
// inlined.
if (op.hasOperand())
@@ -285,32 +277,24 @@ struct ReturnOpLowering : public OpRewritePattern<toy::ReturnOp> {
};
//===----------------------------------------------------------------------===//
-// ToyToAffine RewritePatterns: Transpose operations
+// ToyToAffine Conversion Patterns: Transpose operations
//===----------------------------------------------------------------------===//
-struct TransposeOpLowering : public ConversionPattern {
- TransposeOpLowering(MLIRContext *ctx)
- : ConversionPattern(toy::TransposeOp::getOperationName(), 1, ctx) {}
+struct TransposeOpLowering : public OpConversionPattern<toy::TransposeOp> {
+ using OpConversionPattern<toy::TransposeOp>::OpConversionPattern;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(toy::TransposeOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto loc = op->getLoc();
- lowerOpToLoops(op, operands, rewriter,
- [loc](OpBuilder &builder, ValueRange memRefOperands,
- ValueRange loopIvs) {
- // Generate an adaptor for the remapped operands of the
- // TransposeOp. This allows for using the nice named
- // accessors that are generated by the ODS.
- toy::TransposeOpAdaptor transposeAdaptor(memRefOperands);
- Value input = transposeAdaptor.getInput();
-
- // Transpose the elements by generating a load from the
- // reverse indices.
- SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
- return affine::AffineLoadOp::create(builder, loc, input,
- reverseIvs);
- });
+ lowerOpToLoops(op, rewriter, [&](OpBuilder &builder, ValueRange loopIvs) {
+ Value input = adaptor.getInput();
+
+ // Transpose the elements by generating a load from the
+ // reverse indices.
+ SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs));
+ return affine::AffineLoadOp::create(builder, loc, input, reverseIvs);
+ });
return success();
}
};
diff --git a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
index 43a84da..8b48a8f 100644
--- a/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
+++ b/mlir/examples/toy/Ch7/mlir/LowerToLLVM.cpp
@@ -55,19 +55,18 @@
using namespace mlir;
//===----------------------------------------------------------------------===//
-// ToyToLLVM RewritePatterns
+// ToyToLLVM Conversion Patterns
//===----------------------------------------------------------------------===//
namespace {
/// Lowers `toy.print` to a loop nest calling `printf` on each of the individual
/// elements of the array.
-class PrintOpLowering : public ConversionPattern {
+class PrintOpLowering : public OpConversionPattern<toy::PrintOp> {
public:
- explicit PrintOpLowering(MLIRContext *context)
- : ConversionPattern(toy::PrintOp::getOperationName(), 1, context) {}
+ using OpConversionPattern<toy::PrintOp>::OpConversionPattern;
LogicalResult
- matchAndRewrite(Operation *op, ArrayRef<Value> operands,
+ matchAndRewrite(toy::PrintOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto *context = rewriter.getContext();
auto memRefType = llvm::cast<MemRefType>((*op->operand_type_begin()));
@@ -108,9 +107,8 @@ public:
}
// Generate a call to printf for the current element of the loop.
- auto printOp = cast<toy::PrintOp>(op);
auto elementLoad =
- memref::LoadOp::create(rewriter, loc, printOp.getInput(), loopIvs);
+ memref::LoadOp::create(rewriter, loc, op.getInput(), loopIvs);
LLVM::CallOp::create(rewriter, loc, getPrintfType(context), printfRef,
ArrayRef<Value>({formatSpecifierCst, elementLoad}));
diff --git a/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp
index 2522abe..a552e1f0 100644
--- a/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp
+++ b/mlir/examples/toy/Ch7/mlir/ShapeInferencePass.cpp
@@ -23,7 +23,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Casting.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugLog.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
@@ -81,7 +81,7 @@ struct ShapeInferencePass
opWorklist.erase(op);
// Ask the operation to infer its output shapes.
- LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n");
+ LDBG() << "Inferring shape for: " << *op;
if (auto shapeOp = dyn_cast<ShapeInference>(op)) {
shapeOp.inferShapes();
} else {
diff --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp
index dd86265..32208ecca 100644
--- a/mlir/examples/toy/Ch7/toyc.cpp
+++ b/mlir/examples/toy/Ch7/toyc.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.h"
#include "toy/AST.h"
diff --git a/mlir/examples/transform-opt/CMakeLists.txt b/mlir/examples/transform-opt/CMakeLists.txt
index 8e23555..07d58f6 100644
--- a/mlir/examples/transform-opt/CMakeLists.txt
+++ b/mlir/examples/transform-opt/CMakeLists.txt
@@ -1,18 +1,14 @@
-get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS)
-get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS)
-get_property(extension_libs GLOBAL PROPERTY MLIR_EXTENSION_LIBS)
-
set(LIBS
MLIRAnalysis
MLIRIR
MLIRParser
+ MLIRRegisterAllDialects
+ MLIRRegisterAllExtensions
+ MLIRRegisterAllPasses
MLIRSupport
MLIRTransformDialect
MLIRTransformDialectTransforms
MLIRTransforms
- ${dialect_libs}
- ${conversion_libs}
- ${extension_libs}
)
add_mlir_tool(mlir-transform-opt
diff --git a/mlir/examples/transform-opt/mlir-transform-opt.cpp b/mlir/examples/transform-opt/mlir-transform-opt.cpp
index 1a29913..4b12e76 100644
--- a/mlir/examples/transform-opt/mlir-transform-opt.cpp
+++ b/mlir/examples/transform-opt/mlir-transform-opt.cpp
@@ -22,6 +22,7 @@
#include "mlir/Tools/mlir-opt/MlirOptMain.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/InitLLVM.h"
+#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/ToolOutputFile.h"
#include <cstdlib>
diff --git a/mlir/examples/transform/Ch4/lib/MyExtension.cpp b/mlir/examples/transform/Ch4/lib/MyExtension.cpp
index fa0ffc9..2159483 100644
--- a/mlir/examples/transform/Ch4/lib/MyExtension.cpp
+++ b/mlir/examples/transform/Ch4/lib/MyExtension.cpp
@@ -13,11 +13,9 @@
#include "MyExtension.h"
#include "mlir/Dialect/Transform/IR/TransformDialect.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/DebugLog.h"
-#define DEBUG_TYPE_MATCHER "transform-matcher"
-#define DBGS_MATCHER() (llvm::dbgs() << "[" DEBUG_TYPE_MATCHER "] ")
-#define DEBUG_MATCHER(x) DEBUG_WITH_TYPE(DEBUG_TYPE_MATCHER, x)
+#define DEBUG_TYPE "transform-matcher"
#define GET_OP_CLASSES
#include "MyExtension.cpp.inc"
@@ -124,9 +122,8 @@ mlir::transform::HasOperandSatisfyingOp::apply(
// Report failure-to-match for debugging purposes and stop matching this
// operand.
assert(diag.isSilenceableFailure());
- DEBUG_MATCHER(DBGS_MATCHER()
- << "failed to match operand #" << operand.getOperandNumber()
- << ": " << diag.getMessage());
+ LDBG() << "failed to match operand #" << operand.getOperandNumber()
+ << ": " << diag.getMessage();
(void)diag.silence();
matchSucceeded = false;
break;