aboutsummaryrefslogtreecommitdiff
path: root/mlir/test/Transforms/buffer-placement-preparation.mlir
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/test/Transforms/buffer-placement-preparation.mlir')
-rw-r--r--mlir/test/Transforms/buffer-placement-preparation.mlir85
1 files changed, 46 insertions, 39 deletions
diff --git a/mlir/test/Transforms/buffer-placement-preparation.mlir b/mlir/test/Transforms/buffer-placement-preparation.mlir
index b1cfdfd..4fcd225 100644
--- a/mlir/test/Transforms/buffer-placement-preparation.mlir
+++ b/mlir/test/Transforms/buffer-placement-preparation.mlir
@@ -17,11 +17,12 @@ func @func_signature_conversion(%arg0: tensor<4x8xf32>) {
// CHECK-LABEL: func @memref_in_function_results
func @memref_in_function_results(%arg0: tensor<5xf32>, %arg1: memref<10xf32>) -> (tensor<5xf32>, memref<10xf32>, memref<15xf32>) {
%0 = alloc() : memref<15xf32>
- %1 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
+ %1 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg0 : tensor<5xf32>) {
^bb0(%gen1_arg0: f32):
%tmp1 = exp %gen1_arg0 : f32
linalg.yield %tmp1 : f32
- }: tensor<5xf32> -> tensor<5xf32>
+ } -> tensor<5xf32>
return %1, %arg1, %0 : tensor<5xf32>, memref<10xf32>, memref<15xf32>
}
// CHECK: (%[[ARG0:.*]]: memref<5xf32>, %[[ARG1:.*]]: memref<10xf32>, %[[RESULT:.*]]: memref<5xf32>)
@@ -97,23 +98,25 @@ func @func_and_block_signature_conversion(%arg0 : tensor<2xf32>, %cond : i1, %ar
// CHECK-LABEL: func @compute_allocs_position_simple
func @compute_allocs_position_simple(%cond: i1, %arg0: tensor<2xf32>) -> tensor<2xf32>{
- %0 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
+ %0 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg0 : tensor<2xf32>) {
^bb0(%gen1_arg0: f32):
%tmp1 = exp %gen1_arg0 : f32
linalg.yield %tmp1 : f32
- }: tensor<2xf32> -> tensor<2xf32>
- %1 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %0 {
+ } -> tensor<2xf32>
+ %1 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%0 : tensor<2xf32>) {
^bb0(%gen2_arg0: f32):
%tmp2 = exp %gen2_arg0 : f32
linalg.yield %tmp2 : f32
- }: tensor<2xf32> -> tensor<2xf32>
+ } -> tensor<2xf32>
return %1 : tensor<2xf32>
}
// CHECK: (%{{.*}}: {{.*}}, %[[ARG0:.*]]: memref<2xf32>,
// CHECK-NEXT: %[[FIRST_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ARG0]], %[[FIRST_ALLOC]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ARG0]]{{.*}} outs(%[[FIRST_ALLOC]]
// CHECK: %[[SECOND_ALLOC:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[FIRST_ALLOC]], %[[SECOND_ALLOC]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[FIRST_ALLOC]]{{.*}} outs(%[[SECOND_ALLOC]]
// -----
@@ -123,78 +126,86 @@ func @compute_allocs_position_simple(%cond: i1, %arg0: tensor<2xf32>) -> tensor<
// CHECK-LABEL: func @compute_allocs_position
func @compute_allocs_position(%cond: i1, %arg0: tensor<2xf32>) -> tensor<2xf32>{
- %0 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
+ %0 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg0 : tensor<2xf32>) {
^bb0(%gen1_arg0: f32):
%tmp1 = exp %gen1_arg0 : f32
linalg.yield %tmp1 : f32
- }: tensor<2xf32> -> tensor<2xf32>
- %1 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %0 {
+ } -> tensor<2xf32>
+ %1 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%0 : tensor<2xf32>) {
^bb0(%gen2_arg0: f32):
%tmp2 = exp %gen2_arg0 : f32
linalg.yield %tmp2 : f32
- }: tensor<2xf32> -> tensor<2xf32>
+ } -> tensor<2xf32>
cond_br %cond, ^bb1(%arg0, %0: tensor<2xf32>, tensor<2xf32>),
^bb2(%0, %arg0: tensor<2xf32>, tensor<2xf32>)
^bb1(%arg1 : tensor<2xf32>, %arg2 : tensor<2xf32>):
- %2 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
+ %2 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg0 : tensor<2xf32>) {
^bb0(%gen3_arg0: f32):
%tmp3 = exp %gen3_arg0 : f32
linalg.yield %tmp3 : f32
- }: tensor<2xf32> -> tensor<2xf32>
- %3 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %2 {
+ } -> tensor<2xf32>
+ %3 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%2 : tensor<2xf32>) {
^bb0(%gen4_arg0: f32):
%tmp4 = exp %gen4_arg0 : f32
linalg.yield %tmp4 : f32
- }: tensor<2xf32> -> tensor<2xf32>
+ } -> tensor<2xf32>
br ^exit(%arg1, %arg2 : tensor<2xf32>, tensor<2xf32>)
^bb2(%arg3 : tensor<2xf32>, %arg4 : tensor<2xf32>):
- %4 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
+ %4 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg0 : tensor<2xf32>) {
^bb0(%gen5_arg0: f32):
%tmp5 = exp %gen5_arg0 : f32
linalg.yield %tmp5 : f32
- }: tensor<2xf32> -> tensor<2xf32>
- %5 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %4 {
+ } -> tensor<2xf32>
+ %5 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%4 : tensor<2xf32>) {
^bb0(%gen6_arg0: f32):
%tmp6 = exp %gen6_arg0 : f32
linalg.yield %tmp6 : f32
- }: tensor<2xf32> -> tensor<2xf32>
+ } -> tensor<2xf32>
br ^exit(%arg3, %arg4 : tensor<2xf32>, tensor<2xf32>)
^exit(%arg5 : tensor<2xf32>, %arg6 : tensor<2xf32>):
- %6 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
+ %6 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg0 : tensor<2xf32>) {
^bb0(%gen7_arg0: f32):
%tmp7 = exp %gen7_arg0 : f32
linalg.yield %tmp7 : f32
- }: tensor<2xf32> -> tensor<2xf32>
- %7 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %6 {
+ } -> tensor<2xf32>
+ %7 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%6 : tensor<2xf32>) {
^bb0(%gen8_arg0: f32):
%tmp8 = exp %gen8_arg0 : f32
linalg.yield %tmp8 : f32
- }: tensor<2xf32> -> tensor<2xf32>
+ } -> tensor<2xf32>
return %7 : tensor<2xf32>
}
// CHECK: (%{{.*}}: {{.*}}, %[[ARG0:.*]]: memref<2xf32>,
// CHECK-NEXT: %[[ALLOC0:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ARG0]], %[[ALLOC0]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ARG0]]{{.*}} outs(%[[ALLOC0]]
// CHECK: %[[ALLOC1:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ALLOC0]], %[[ALLOC1]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ALLOC0]]{{.*}} outs(%[[ALLOC1]]
// CHECK: cond_br %{{.*}}, ^[[BB0:.*]]({{.*}}), ^[[BB1:.*]](
// CHECK-NEXT: ^[[BB0]]
// CHECK-NEXT: %[[ALLOC2:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ARG0]], %[[ALLOC2]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ARG0]]{{.*}} outs(%[[ALLOC2]]
// CHECK: %[[ALLOC3:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ALLOC2]], %[[ALLOC3]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ALLOC2]]{{.*}} outs(%[[ALLOC3]]
// CHECK: br ^[[EXIT:.*]]({{.*}})
// CHECK-NEXT: ^[[BB1]]
// CHECK-NEXT: %[[ALLOC4:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ARG0]], %[[ALLOC4]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ARG0]]{{.*}} outs(%[[ALLOC4]]
// CHECK: %[[ALLOC5:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ALLOC4]], %[[ALLOC5]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ALLOC4]]{{.*}} outs(%[[ALLOC5]]
// CHECK: br ^[[EXIT]]
// CHECK-NEXT: ^[[EXIT]]
// CHECK-NEXT: %[[ALLOC6:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ARG0]], %[[ALLOC6]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ARG0]]{{.*}} outs(%[[ALLOC6]]
// CHECK: %[[ALLOC7:.*]] = alloc()
-// CHECK-NEXT: linalg.generic {{.*}} %[[ALLOC6]], %[[ALLOC7]]
+// CHECK-NEXT: linalg.generic {{.*}} ins(%[[ALLOC6]]{{.*}} outs(%[[ALLOC7]]
// -----
@@ -211,16 +222,12 @@ func @compute_allocs_position(%cond: i1, %arg0: tensor<2xf32>) -> tensor<2xf32>{
// CHECK-LABEL: func @callee
func @callee(%arg1: tensor<5xf32>) -> tensor<5xf32> {
- %0 = linalg.generic {
- args_in = 1 : i64,
- args_out = 1 : i64,
- indexing_maps = [#map0, #map0],
- iterator_types = ["parallel"]
- } %arg1 {
+ %0 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel"]}
+ ins(%arg1 : tensor<5xf32>) {
^bb0(%gen1_arg0: f32):
%tmp1 = exp %gen1_arg0 : f32
linalg.yield %tmp1 : f32
- }: tensor<5xf32> -> tensor<5xf32>
+ } -> tensor<5xf32>
return %0 : tensor<5xf32>
}
// CHECK: (%[[CALLEE_ARG:.*]]: memref<5xf32>, %[[CALLEE_RESULT:.*]]: memref<5xf32>)