diff options
Diffstat (limited to 'mlir/docs')
-rw-r--r-- | mlir/docs/Dialects/Vector.md | 2 | ||||
-rw-r--r-- | mlir/docs/Dialects/emitc.md | 2 | ||||
-rw-r--r-- | mlir/docs/Interfaces.md | 4 | ||||
-rw-r--r-- | mlir/docs/PDLL.md | 12 | ||||
-rw-r--r-- | mlir/docs/Tutorials/QuickstartRewrites.md | 6 | ||||
-rw-r--r-- | mlir/docs/Tutorials/Toy/Ch-2.md | 2 | ||||
-rw-r--r-- | mlir/docs/Tutorials/Toy/Ch-4.md | 4 | ||||
-rw-r--r-- | mlir/docs/Tutorials/Toy/Ch-5.md | 57 | ||||
-rw-r--r-- | mlir/docs/Tutorials/Toy/Ch-6.md | 2 | ||||
-rw-r--r-- | mlir/docs/Tutorials/Toy/Ch-7.md | 4 | ||||
-rw-r--r-- | mlir/docs/Tutorials/transform/Ch0.md | 20 | ||||
-rw-r--r-- | mlir/docs/Tutorials/transform/Ch3.md | 16 |
12 files changed, 65 insertions, 66 deletions
diff --git a/mlir/docs/Dialects/Vector.md b/mlir/docs/Dialects/Vector.md index ebeb0a2..6c8949d 100644 --- a/mlir/docs/Dialects/Vector.md +++ b/mlir/docs/Dialects/Vector.md @@ -294,7 +294,7 @@ LLVM instructions are prefixed by the `llvm.` dialect prefix (e.g. `llvm.insertvalue`). Such ops operate exclusively on 1-D vectors and aggregates following the [LLVM LangRef](https://llvm.org/docs/LangRef.html). MLIR operations are prefixed by the `vector.` dialect prefix (e.g. -`vector.insertelement`). Such ops operate exclusively on MLIR `n-D` `vector` +`vector.insert`). Such ops operate exclusively on MLIR `n-D` `vector` types. ### Alternatives For Lowering an n-D Vector Type to LLVM diff --git a/mlir/docs/Dialects/emitc.md b/mlir/docs/Dialects/emitc.md index e2288f5..6d09e93 100644 --- a/mlir/docs/Dialects/emitc.md +++ b/mlir/docs/Dialects/emitc.md @@ -18,6 +18,8 @@ The following convention is followed: GCC or Clang. * If `emitc.array` with a dimension of size zero is used, then the code requires [a GCC extension](https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html). +* If `aligned_alloc` is passed to an `emitc.call_opaque` operation, then C++17 + or C11 is required. * Else the generated code is compatible with C99. These restrictions are neither inherent to the EmitC dialect itself nor to the diff --git a/mlir/docs/Interfaces.md b/mlir/docs/Interfaces.md index bf590ac..7e1c5fe 100644 --- a/mlir/docs/Interfaces.md +++ b/mlir/docs/Interfaces.md @@ -563,7 +563,7 @@ def MyInterface : OpInterface<"MyInterface"> { template <typename ConcreteOp> struct Model : public Concept { Operation *create(OpBuilder &builder, Location loc) const override { - return builder.create<ConcreteOp>(loc); + return ConcreteOp::create(builder, loc); } } }; @@ -574,7 +574,7 @@ def MyInterface : OpInterface<"MyInterface"> { }], "Operation *", "create", (ins "OpBuilder &":$builder, "Location":$loc), /*methodBody=*/[{ - return builder.create<ConcreteOp>(loc); + return ConcreteOp::create(builder, loc); }]>, InterfaceMethod<[{ diff --git a/mlir/docs/PDLL.md b/mlir/docs/PDLL.md index 9839d1d..c6e352f 100644 --- a/mlir/docs/PDLL.md +++ b/mlir/docs/PDLL.md @@ -1483,7 +1483,7 @@ be defined by specifying a string code block after the rewrite declaration: ```pdll Rewrite BuildOp(value: Value) -> (foo: Op<my_dialect.foo>, bar: Op<my_dialect.bar>) [{ - return {rewriter.create<my_dialect::FooOp>(value), rewriter.create<my_dialect::BarOp>()}; + return {my_dialect::FooOp::create(rewriter, value), my_dialect::BarOp::create(rewriter)}; }]; Pattern { @@ -1508,7 +1508,7 @@ translated into: ```c++ std::tuple<my_dialect::FooOp, my_dialect::BarOp> BuildOp(Value value) { - return {rewriter.create<my_dialect::FooOp>(value), rewriter.create<my_dialect::BarOp>()}; + return {my_dialect::FooOp::create(rewriter, value), my_dialect::BarOp::create(rewriter)}; } ``` @@ -1530,7 +1530,7 @@ below describes the various result translation scenarios: ```pdll Rewrite createOp() [{ - rewriter.create<my_dialect::FooOp>(); + my_dialect::FooOp::create(rewriter); }]; ``` @@ -1538,7 +1538,7 @@ In the case where a native `Rewrite` has no results, the native function returns ```c++ void createOp(PatternRewriter &rewriter) { - rewriter.create<my_dialect::FooOp>(); + my_dialect::FooOp::create(rewriter); } ``` @@ -1546,7 +1546,7 @@ void createOp(PatternRewriter &rewriter) { ```pdll Rewrite createOp() -> Op<my_dialect.foo> [{ - return rewriter.create<my_dialect::FooOp>(); + return my_dialect::FooOp::create(rewriter); }]; ``` @@ -1555,7 +1555,7 @@ native type for that single result: ```c++ my_dialect::FooOp createOp(PatternRewriter &rewriter) { - return rewriter.create<my_dialect::FooOp>(); + return my_dialect::FooOp::create(rewriter); } ``` diff --git a/mlir/docs/Tutorials/QuickstartRewrites.md b/mlir/docs/Tutorials/QuickstartRewrites.md index 0c89065..cbb6f03 100644 --- a/mlir/docs/Tutorials/QuickstartRewrites.md +++ b/mlir/docs/Tutorials/QuickstartRewrites.md @@ -130,7 +130,7 @@ def : Pat<(TF_LeakyReluOp:$old_value, $arg, F32Attr:$a), ```c++ static Value createTFLLeakyRelu(PatternRewriter &rewriter, Operation *op, Value operand, Attribute attr) { - return rewriter.create<mlir::TFL::LeakyReluOp>( + return mlir::TFL::LeakyReluOp::create(rewriter, op->getLoc(), operands[0].getType(), /*arg=*/operands[0], /*alpha=*/cast<FloatAttr>(attrs[0])); } @@ -194,10 +194,10 @@ LogicalResult circt::MulOp::canonicalize(MulOp op, PatternRewriter &rewriter) { // mul(x, c) -> shl(x, log2(c)), where c is a power of two. if (inputs.size() == 2 && matchPattern(inputs.back(), m_RConstant(value)) && value.isPowerOf2()) { - auto shift = rewriter.create<rtl::ConstantOp>(op.getLoc(), op.getType(), + auto shift = rtl::ConstantOp::create(rewriter, op.getLoc(), op.getType(), value.exactLogBase2()); auto shlOp = - rewriter.create<comb::ShlOp>(op.getLoc(), inputs[0], shift); + comb::ShlOp::create(rewriter, op.getLoc(), inputs[0], shift); rewriter.replaceOpWithNewOp<MulOp>(op, op.getType(), ArrayRef<Value>(shlOp)); return success(); diff --git a/mlir/docs/Tutorials/Toy/Ch-2.md b/mlir/docs/Tutorials/Toy/Ch-2.md index 039417c..81e4161 100644 --- a/mlir/docs/Tutorials/Toy/Ch-2.md +++ b/mlir/docs/Tutorials/Toy/Ch-2.md @@ -521,7 +521,7 @@ def ConstantOp : Toy_Op<"constant"> { // Add custom build methods for the constant operation. These methods populate // the `state` that MLIR uses to create operations, i.e. these are used when - // using `builder.create<ConstantOp>(...)`. + // using `ConstantOp::create(builder, ...)`. let builders = [ // Build a constant with a given constant tensor value. OpBuilder<(ins "DenseElementsAttr":$value), [{ diff --git a/mlir/docs/Tutorials/Toy/Ch-4.md b/mlir/docs/Tutorials/Toy/Ch-4.md index 1275d36..e9abe36 100644 --- a/mlir/docs/Tutorials/Toy/Ch-4.md +++ b/mlir/docs/Tutorials/Toy/Ch-4.md @@ -300,7 +300,7 @@ struct ToyInlinerInterface : public DialectInlinerInterface { Operation *materializeCallConversion(OpBuilder &builder, Value input, Type resultType, Location conversionLoc) const final { - return builder.create<CastOp>(conversionLoc, resultType, input); + return CastOp::create(builder, conversionLoc, resultType, input); } }; ``` @@ -445,7 +445,7 @@ When processing an operation like described, we query if it registered the ```c++ // Ask the operation to infer its output shapes. - LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); + LDBG() << "Inferring shape for: " << *op; /// We check if an operation has a particular interface by casting. if (ShapeInference shapeOp = dyn_cast<ShapeInference>(op)) { diff --git a/mlir/docs/Tutorials/Toy/Ch-5.md b/mlir/docs/Tutorials/Toy/Ch-5.md index d483cd8..17cd6bb 100644 --- a/mlir/docs/Tutorials/Toy/Ch-5.md +++ b/mlir/docs/Tutorials/Toy/Ch-5.md @@ -91,13 +91,11 @@ doesn't matter. See `ConversionTarget::getOpInfo` for the details. After the conversion target has been defined, we can define how to convert the *illegal* operations into *legal* ones. Similarly to the canonicalization framework introduced in [chapter 3](Ch-3.md), the -[`DialectConversion` framework](../../DialectConversion.md) also uses -[RewritePatterns](../QuickstartRewrites.md) to perform the conversion logic. -These patterns may be the `RewritePatterns` seen before or a new type of pattern -specific to the conversion framework `ConversionPattern`. `ConversionPatterns` +[`DialectConversion` framework](../../DialectConversion.md) uses a special kind +of `ConversionPattern` to perform the conversion logic. `ConversionPatterns` are different from traditional `RewritePatterns` in that they accept an -additional `operands` parameter containing operands that have been -remapped/replaced. This is used when dealing with type conversions, as the +additional `operands` (or `adaptor`) parameter containing operands that have +been remapped/replaced. This is used when dealing with type conversions, as the pattern will want to operate on values of the new type but match against the old. For our lowering, this invariant will be useful as it translates from the [TensorType](../../Dialects/Builtin.md/#rankedtensortype) currently being @@ -106,38 +104,23 @@ look at a snippet of lowering the `toy.transpose` operation: ```c++ /// Lower the `toy.transpose` operation to an affine loop nest. -struct TransposeOpLowering : public mlir::ConversionPattern { - TransposeOpLowering(mlir::MLIRContext *ctx) - : mlir::ConversionPattern(TransposeOp::getOperationName(), 1, ctx) {} - - /// Match and rewrite the given `toy.transpose` operation, with the given - /// operands that have been remapped from `tensor<...>` to `memref<...>`. - llvm::LogicalResult - matchAndRewrite(mlir::Operation *op, ArrayRef<mlir::Value> operands, - mlir::ConversionPatternRewriter &rewriter) const final { - auto loc = op->getLoc(); +struct TransposeOpLowering : public OpConversionPattern<toy::TransposeOp> { + using OpConversionPattern<toy::TransposeOp>::OpConversionPattern; - // Call to a helper function that will lower the current operation to a set - // of affine loops. We provide a functor that operates on the remapped - // operands, as well as the loop induction variables for the inner most - // loop body. - lowerOpToLoops( - op, operands, rewriter, - [loc](mlir::PatternRewriter &rewriter, - ArrayRef<mlir::Value> memRefOperands, - ArrayRef<mlir::Value> loopIvs) { - // Generate an adaptor for the remapped operands of the TransposeOp. - // This allows for using the nice named accessors that are generated - // by the ODS. This adaptor is automatically provided by the ODS - // framework. - TransposeOpAdaptor transposeAdaptor(memRefOperands); - mlir::Value input = transposeAdaptor.input(); - - // Transpose the elements by generating a load from the reverse - // indices. - SmallVector<mlir::Value, 2> reverseIvs(llvm::reverse(loopIvs)); - return rewriter.create<mlir::AffineLoadOp>(loc, input, reverseIvs); - }); + LogicalResult + matchAndRewrite(toy::TransposeOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const final { + auto loc = op->getLoc(); + lowerOpToLoops(op, rewriter, + [&](OpBuilder &builder, ValueRange loopIvs) { + Value input = adaptor.getInput(); + + // Transpose the elements by generating a load from the + // reverse indices. + SmallVector<Value, 2> reverseIvs(llvm::reverse(loopIvs)); + return affine::AffineLoadOp::create(builder, loc, input, + reverseIvs); + }); return success(); } }; diff --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md index e8a68b5..529de55 100644 --- a/mlir/docs/Tutorials/Toy/Ch-6.md +++ b/mlir/docs/Tutorials/Toy/Ch-6.md @@ -47,7 +47,7 @@ static FlatSymbolRefAttr getOrInsertPrintf(PatternRewriter &rewriter, // Insert the printf function into the body of the parent module. PatternRewriter::InsertionGuard insertGuard(rewriter); rewriter.setInsertionPointToStart(module.getBody()); - rewriter.create<LLVM::LLVMFuncOp>(module.getLoc(), "printf", llvmFnType); + LLVM::LLVMFuncOp::create(rewriter, module.getLoc(), "printf", llvmFnType); return SymbolRefAttr::get("printf", context); } ``` diff --git a/mlir/docs/Tutorials/Toy/Ch-7.md b/mlir/docs/Tutorials/Toy/Ch-7.md index dce3490..0f50c49 100644 --- a/mlir/docs/Tutorials/Toy/Ch-7.md +++ b/mlir/docs/Tutorials/Toy/Ch-7.md @@ -488,9 +488,9 @@ mlir::Operation *ToyDialect::materializeConstant(mlir::OpBuilder &builder, mlir::Type type, mlir::Location loc) { if (isa<StructType>(type)) - return builder.create<StructConstantOp>(loc, type, + return StructConstantOp::create(builder, loc, type, cast<mlir::ArrayAttr>(value)); - return builder.create<ConstantOp>(loc, type, + return ConstantOp::create(builder, loc, type, cast<mlir::DenseElementsAttr>(value)); } ``` diff --git a/mlir/docs/Tutorials/transform/Ch0.md b/mlir/docs/Tutorials/transform/Ch0.md index ac3989a..dc4b753 100644 --- a/mlir/docs/Tutorials/transform/Ch0.md +++ b/mlir/docs/Tutorials/transform/Ch0.md @@ -46,7 +46,7 @@ When no support is available, such an operation can be transformed into a loop: %c8 = arith.constant 8 : index %init = arith.constant 0.0 : f32 %result = scf.for %i = %c0 to %c8 step %c1 iter_args(%partial = %init) -> (f32) { - %element = vector.extractelement %0[%i : index] : vector<8xf32> + %element = vector.extract %0[%i] : f32 into vector<8xf32> %updated = arith.addf %partial, %element : f32 scf.yield %updated : f32 } @@ -145,7 +145,7 @@ linalg.generic { %c0 = arith.constant 0.0 : f32 %0 = arith.cmpf ogt %in_one, %c0 : f32 %1 = arith.select %0, %in_one, %c0 : f32 - linalg.yield %1 : f32 + linalg.yield %1 : f32 } ``` @@ -185,7 +185,7 @@ In the case of `linalg.generic` operations, the iteration space is implicit and For example, tiling the matrix multiplication presented above with tile sizes `(2, 8)`, we obtain a loop nest around a `linalg.generic` expressing the same operation on a `2x8` tensor. ```mlir -// A special "multi-for" loop that supports tensor-insertion semantics +// A special "multi-for" loop that supports tensor-insertion semantics // as opposed to implicit updates. The resulting 8x16 tensor will be produced // by this loop. // The trip count of iterators is computed dividing the original tensor size, @@ -202,9 +202,9 @@ For example, tiling the matrix multiplication presented above with tile sizes `( // Take slices of inputs and outputs. Only the "i" and "j" dimensions are sliced. %lhs_slice = tensor.extract_slice %lhs[%3, 0] [2, 10] [1, 1] : tensor<8x10xf32> to tensor<2x10xf32> - %rhs_slice = tensor.extract_slice %rhs[0, %4] [10, 8] [1, 1] + %rhs_slice = tensor.extract_slice %rhs[0, %4] [10, 8] [1, 1] : tensor<10x16xf32> to tensor<10x8xf32> - %result_slice = tensor.extract_slice %shared[%3, %4] [2, 8] [1, 1] + %result_slice = tensor.extract_slice %shared[%3, %4] [2, 8] [1, 1] : tensor<8x16xf32> to tensor<2x8xf32> // This is exactly the same operation as before, but now operating on smaller @@ -214,7 +214,7 @@ For example, tiling the matrix multiplication presented above with tile sizes `( affine_map<(i, j, k) -> (k, j)>, affine_map<(i, j, k) -> (i, j)>], iterator_types = ["parallel", "parallel", "reduction"] - } ins(%lhs_slice, %rhs_slice : tensor<2x10xf32>, tensor<10x8xf32>) + } ins(%lhs_slice, %rhs_slice : tensor<2x10xf32>, tensor<10x8xf32>) outs(%result_slice : tensor<2x8xf32>) -> tensor<2x8xf32> { ^bb0(%lhs_one: f32, %rhs_one: f32, %init_one: f32): %0 = arith.mulf %lhs_one, %rhs_one : f32 @@ -238,15 +238,15 @@ After materializing loops with tiling, another key code generation transformatio 1. the subset (slice) of the operand that is used by the tile, and 2. the tensor-level structured operation producing the whole tensor that is being sliced. -By inverting the `indexing_map` and applying it to the set of elements accessed through the slice, we can compute the part of the iteration space of the operation defining the full tensor necessary to compute the tile. Thus fusion boils down to replacing the `tensor.extract_slice` operation with the tile of the `linalg.generic` producing the original operand. +By inverting the `indexing_map` and applying it to the set of elements accessed through the slice, we can compute the part of the iteration space of the operation defining the full tensor necessary to compute the tile. Thus fusion boils down to replacing the `tensor.extract_slice` operation with the tile of the `linalg.generic` producing the original operand. Let us assume that the matrix multiplication operation is followed by another operation that multiplies each element of the resulting matrix with itself. This trailing elementwise operation has a 2D iteration space, unlike the 3D one in matrix multiplication. Nevertheless, it is possible to tile the trailing operation and then fuse the producer of its operand, the matmul, into the loop generated by tiling. The untiled dimension will be used in its entirety. ```mlir // Same loop as before. -%0 = scf.forall (%i, %j) in (4, 2) - shared_outs(%shared = %init) +%0 = scf.forall (%i, %j) in (4, 2) + shared_outs(%shared = %init) -> (tensor<8x16xf32>, tensor<8x16xf32>) { // Scale the loop induction variables by the tile sizes. %1 = affine.apply affine_map<(d0) -> (d0 * 2)>(%i) @@ -286,7 +286,7 @@ Let us assume that the matrix multiplication operation is followed by another op indexing_maps = [affine_map<(i, j) -> (i, j)>, affine_map<(i, j) -> (i, j)>], iterator_types = ["parallel", "parallel"] - } ins(%partial : tensor<2x8xf32>) + } ins(%partial : tensor<2x8xf32>) outs(%shared_slice : tensor<2x8xf32>) { ^bb0(%in: f32, %out: f32): %5 = arith.mulf %in, %in : f32 diff --git a/mlir/docs/Tutorials/transform/Ch3.md b/mlir/docs/Tutorials/transform/Ch3.md index fa788d1..eeab770 100644 --- a/mlir/docs/Tutorials/transform/Ch3.md +++ b/mlir/docs/Tutorials/transform/Ch3.md @@ -139,7 +139,21 @@ void MyExtension::init() { ``` This type is now directly available in the Transform dialect and can be used in operations. +In the previous tablegen definition, the type of `$call` must be `Transform_ConcreteOp<“func.call”>`, +By adding `CallOpInterfaceHandle` as an allowed type for `$call`, the corresponding handle +is allowed to be to any op implementing the interface. +```tablegen +def ChangeCallTargetOp : ... { + let arguments = (ins + // Allow the handle to be to concrete `func.call` ops as well as any op implementing + // the `CallOpInterface`. + AnyTypeOf<[Transform_ConcreteOpType<"func.call">, CallOpInterfaceHandle]>:$call, + StrAttr:$new_target); +} +``` + +We can then add the following code to `sequence.mlir` and run it with the interpreter. ```mlir // Cast to our new type. @@ -172,7 +186,7 @@ def CallToOp : Op<Transform_Dialect, "my.call_to_op", let results = (outs TransformHandleTypeInterface:$transformed); // Provide nice syntax. - let assemblyFormat = "$call attr-dict `:` functional-type(inputs, outputs)"; + let assemblyFormat = "$call attr-dict `:` functional-type(operands, results)"; // Declare the function implementing the interface for a single payload operation. let extraClassDeclaration = [{ |