aboutsummaryrefslogtreecommitdiff
path: root/mlir/test
diff options
context:
space:
mode:
authorEhsan Toosi <ehsan.nadjaran_toosi@dfki.de>2020-06-02 18:12:57 +0200
committerEhsan Toosi <ehsan.nadjaran_toosi@dfki.de>2020-06-08 09:25:41 +0200
commit4214031d4337a6b04ae4c28119305182e37c45bc (patch)
treed3c994e83ee1201ad5cbd4d02e6749da7ec4c1d7 /mlir/test
parent1778564f9118e29628a76a9cfea87d76fa83f0e6 (diff)
downloadllvm-4214031d4337a6b04ae4c28119305182e37c45bc.zip
llvm-4214031d4337a6b04ae4c28119305182e37c45bc.tar.gz
llvm-4214031d4337a6b04ae4c28119305182e37c45bc.tar.bz2
[mlir] Introduce allowMemrefFunctionResults for the helper operation converters of buffer placement
This parameter gives the developers the freedom to choose their desired function signature conversion for preparing their functions for buffer placement. It is introduced for BufferAssignmentFuncOpConverter, and also for BufferAssignmentReturnOpConverter, and BufferAssignmentCallOpConverter to adapt the return and call operations with the selected function signature conversion. If the parameter is set, buffer placement won't also deallocate the returned buffers. Differential Revision: https://reviews.llvm.org/D81137
Diffstat (limited to 'mlir/test')
-rw-r--r--mlir/test/Transforms/buffer-placement-preparation-allowed-memref-results.mlir108
-rw-r--r--mlir/test/Transforms/buffer-placement-preparation.mlir4
-rw-r--r--mlir/test/lib/Transforms/TestBufferPlacement.cpp53
3 files changed, 141 insertions, 24 deletions
diff --git a/mlir/test/Transforms/buffer-placement-preparation-allowed-memref-results.mlir b/mlir/test/Transforms/buffer-placement-preparation-allowed-memref-results.mlir
new file mode 100644
index 0000000..adf6e30
--- /dev/null
+++ b/mlir/test/Transforms/buffer-placement-preparation-allowed-memref-results.mlir
@@ -0,0 +1,108 @@
+// RUN: mlir-opt -test-buffer-placement-preparation-with-allowed-memref-results -split-input-file %s | FileCheck %s -dump-input-on-failure
+
+// Since allowMemrefEscaping is on for Buffer Placement in this test pass, all
+// tensor typed function results are converted to memref and remain as function
+// results. All memref typed function results will escape from the deallocation
+// phase of Buffer Placement.
+
+// CHECK-LABEL: func @void_function_signature_conversion
+func @void_function_signature_conversion(%arg0: tensor<4x8xf32>) {
+ return
+}
+// CHECK: ({{.*}}: memref<4x8xf32>)
+
+// -----
+
+#map0 = affine_map<(d0) -> (d0)>
+
+// CHECK-LABEL: func @complex_signature_conversion
+func @complex_signature_conversion(%arg0: tensor<5xf32>, %arg1: memref<10xf32>, %arg2: i1, %arg3: f16) -> (i1, tensor<5xf32>, memref<10xf32>, memref<15xf32>, f16) {
+ %0 = alloc() : memref<15xf32>
+ %1 = linalg.generic {
+ args_in = 1 : i64,
+ args_out = 1 : i64,
+ indexing_maps = [#map0, #map0],
+ iterator_types = ["parallel"]
+ } %arg0 {
+ ^bb0(%gen1_arg0: f32):
+ %tmp1 = exp %gen1_arg0 : f32
+ linalg.yield %tmp1 : f32
+ }: tensor<5xf32> -> tensor<5xf32>
+ return %arg2, %1, %arg1, %0, %arg3 : i1, tensor<5xf32>, memref<10xf32>, memref<15xf32>, f16
+}
+// CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>, %[[ARG2:.*]]: i1, %[[ARG3:.*]]: f16)
+// CHECK-SAME: (i1, memref<5xf32>, memref<10xf32>, memref<15xf32>, f16)
+// CHECK: %[[FIRST_ALLOC:.*]] = alloc()
+// CHECK: %[[LINALG_ALLOC:.*]] = alloc()
+// CHECK: return %[[ARG2]], %[[LINALG_ALLOC]], %[[ARG1]], %[[FIRST_ALLOC]], %[[ARG3]]
+
+// -----
+
+// CHECK-LABEL: func @no_signature_conversion_is_needed
+func @no_signature_conversion_is_needed(%arg0: memref<4x8xf32>) {
+ return
+}
+// CHECK: ({{.*}}: memref<4x8xf32>)
+
+// -----
+
+// CHECK-LABEL: func @no_signature_conversion_is_needed
+func @no_signature_conversion_is_needed(%arg0: i1, %arg1: f16) -> (i1, f16){
+ return %arg0, %arg1 : i1, f16
+}
+// CHECK: (%[[ARG0:.*]]: i1, %[[ARG1:.*]]: f16) -> (i1, f16)
+// CHECK: return %[[ARG0]], %[[ARG1]]
+
+// -----
+
+// CHECK-LABEL: func @simple_signature_conversion
+func @simple_signature_conversion(%arg0: tensor<4x8xf32>) -> tensor<4x8xf32> {
+ return %arg0 : tensor<4x8xf32>
+}
+// CHECK: (%[[ARG0:.*]]: [[TYPE:.*]]<[[RANK:.*]]>) -> [[TYPE]]<[[RANK]]>
+// CHECK-NEXT: return %[[ARG0]]
+
+// -----
+
+// CHECK-LABEL: func @func_and_block_signature_conversion
+func @func_and_block_signature_conversion(%arg0 : tensor<2xf32>, %cond : i1, %arg1: tensor<4x4xf32>) -> tensor<4x4xf32>{
+ cond_br %cond, ^bb1, ^bb2
+ ^bb1:
+ br ^exit(%arg0 : tensor<2xf32>)
+ ^bb2:
+ br ^exit(%arg0 : tensor<2xf32>)
+ ^exit(%arg2: tensor<2xf32>):
+ return %arg1 : tensor<4x4xf32>
+}
+// CHECK: (%[[ARG0:.*]]: [[ARG0_TYPE:.*]], %[[COND:.*]]: i1, %[[ARG1:.*]]: [[ARG1_TYPE:.*]]) -> [[RESULT_TYPE:.*]]
+// CHECK: br ^[[EXIT_BLOCK:.*]](%[[ARG0]] : [[ARG0_TYPE]])
+// CHECK: br ^[[EXIT_BLOCK]](%[[ARG0]] : [[ARG0_TYPE]])
+// CHECK: ^[[EXIT_BLOCK]](%{{.*}}: [[ARG0_TYPE]])
+// CHECK-NEXT: return %[[ARG1]]
+
+// -----
+
+// CHECK-LABEL: func @callee
+func @callee(%arg1: tensor<5xf32>) -> (tensor<5xf32>, memref<2xf32>) {
+ %buff = alloc() : memref<2xf32>
+ return %arg1, %buff : tensor<5xf32>, memref<2xf32>
+}
+// CHECK: (%[[CALLEE_ARG:.*]]: memref<5xf32>) -> (memref<5xf32>, memref<2xf32>)
+// CHECK: %[[ALLOC:.*]] = alloc()
+// CHECK: return %[[CALLEE_ARG]], %[[ALLOC]]
+
+// CHECK-LABEL: func @caller
+func @caller(%arg0: tensor<5xf32>) -> tensor<5xf32> {
+ %x:2 = call @callee(%arg0) : (tensor<5xf32>) -> (tensor<5xf32>, memref<2xf32>)
+ %y:2 = call @callee(%x#0) : (tensor<5xf32>) -> (tensor<5xf32>, memref<2xf32>)
+ return %y#0 : tensor<5xf32>
+}
+// CHECK: (%[[CALLER_ARG:.*]]: memref<5xf32>) -> memref<5xf32>
+// CHECK: %[[X:.*]]:2 = call @callee(%[[CALLER_ARG]])
+// CHECK: %[[Y:.*]]:2 = call @callee(%[[X]]#0)
+// CHECK: return %[[Y]]#0
+
+
+
+
+
diff --git a/mlir/test/Transforms/buffer-placement-preparation.mlir b/mlir/test/Transforms/buffer-placement-preparation.mlir
index 5cde928..cae2829 100644
--- a/mlir/test/Transforms/buffer-placement-preparation.mlir
+++ b/mlir/test/Transforms/buffer-placement-preparation.mlir
@@ -199,7 +199,7 @@ func @compute_allocs_position(%cond: i1, %arg0: tensor<2xf32>) -> tensor<2xf32>{
// -----
// Test case: Checking BufferAssignmentCallOpConverter and
-// FunctionAndBlockSignatureConverter and BufferAssignmentReturnOpConverter all
+// BufferAssignmentFuncOpConverter and BufferAssignmentReturnOpConverter all
// together. The signature of `callee` after signature conversion would be:
// func @callee(%arg0: memref<5xf32>,%arg1: memref<5xf32>) -> ()
@@ -246,7 +246,7 @@ func @caller(%arg0: tensor<5xf32>) -> tensor<5xf32> {
// -----
// Test case: Checking BufferAssignmentCallOpConverter and
-// FunctionAndBlockSignatureConverter and BufferAssignmentReturnOpConverter all
+// BufferAssignmentFuncOpConverter and BufferAssignmentReturnOpConverter all
// together on functions that also have memref typed results. The signature of
// `callee` after signature conversion would be:
diff --git a/mlir/test/lib/Transforms/TestBufferPlacement.cpp b/mlir/test/lib/Transforms/TestBufferPlacement.cpp
index aee12b37..3d0cc29 100644
--- a/mlir/test/lib/Transforms/TestBufferPlacement.cpp
+++ b/mlir/test/lib/Transforms/TestBufferPlacement.cpp
@@ -21,17 +21,22 @@
using namespace mlir;
namespace {
-/// This pass tests the computeAllocPosition helper method and two provided
-/// operation converters, FunctionAndBlockSignatureConverter and
-/// BufferAssignmentReturnOpConverter. Furthermore, this pass converts linalg
-/// operations on tensors to linalg operations on buffers to prepare them for
-/// the BufferPlacement pass that can be applied afterwards.
+/// This pass tests the computeAllocPosition helper method and buffer assignment
+/// operation converters. Furthermore, this pass converts linalg operations on
+/// tensors to linalg operations on buffers to prepare them for the
+/// BufferPlacement pass that can be applied afterwards.
+/// `allowMemrefFunctionResults` informs the buffer placement to allow functions
+/// that have memref typed results. Buffer assignment operation converters will
+/// be adapted respectively. It will also allow memref typed results to escape
+/// from the deallocation.
+template <bool allowMemrefFunctionResults>
struct TestBufferPlacementPreparationPass
- : mlir::PassWrapper<TestBufferPlacementPreparationPass,
- OperationPass<ModuleOp>> {
+ : mlir::PassWrapper<
+ TestBufferPlacementPreparationPass<allowMemrefFunctionResults>,
+ OperationPass<ModuleOp>> {
- /// Converts tensor-type generic linalg operations to memref ones using buffer
- /// assignment.
+ /// Converts tensor-type generic linalg operations to memref ones using
+ /// buffer assignment.
class GenericOpConverter
: public BufferAssignmentOpConversionPattern<linalg::GenericOp> {
public:
@@ -104,19 +109,14 @@ struct TestBufferPlacementPreparationPass
void populateTensorLinalgToBufferLinalgConversionPattern(
MLIRContext *context, BufferAssignmentPlacer *placer,
TypeConverter *converter, OwningRewritePatternList *patterns) {
- // clang-format off
- patterns->insert<
- BufferAssignmentCallOpConverter,
- FunctionAndBlockSignatureConverter,
- GenericOpConverter,
- BufferAssignmentReturnOpConverter<
- ReturnOp, ReturnOp, linalg::CopyOp>
- >(context, placer, converter);
- // clang-format on
+ populateWithBufferAssignmentOpConversionPatterns<
+ mlir::ReturnOp, mlir::ReturnOp, linalg::CopyOp,
+ allowMemrefFunctionResults>(context, placer, converter, patterns);
+ patterns->insert<GenericOpConverter>(context, placer, converter);
}
void runOnOperation() override {
- MLIRContext &context = getContext();
+ MLIRContext &context = this->getContext();
ConversionTarget target(context);
BufferAssignmentTypeConverter converter;
@@ -150,7 +150,7 @@ struct TestBufferPlacementPreparationPass
});
// Walk over all the functions to apply buffer assignment.
- getOperation().walk([&](FuncOp function) -> WalkResult {
+ this->getOperation().walk([&](FuncOp function) -> WalkResult {
OwningRewritePatternList patterns;
BufferAssignmentPlacer placer(function);
populateTensorLinalgToBufferLinalgConversionPattern(
@@ -165,9 +165,18 @@ struct TestBufferPlacementPreparationPass
namespace mlir {
void registerTestBufferPlacementPreparationPass() {
- PassRegistration<TestBufferPlacementPreparationPass>(
+ PassRegistration<
+ TestBufferPlacementPreparationPass</*allowMemrefFunctionResults=*/false>>(
"test-buffer-placement-preparation",
"Tests buffer placement helper methods including its "
"operation-conversion patterns");
}
-} // end namespace mlir \ No newline at end of file
+
+void registerTestPreparationPassWithAllowedMemrefResults() {
+ PassRegistration<
+ TestBufferPlacementPreparationPass</*allowMemrefFunctionResults=*/true>>(
+ "test-buffer-placement-preparation-with-allowed-memref-results",
+ "Tests the helper operation converters of buffer placement for allowing "
+ "functions to have memref typed results.");
+}
+} // end namespace mlir