aboutsummaryrefslogtreecommitdiff
path: root/mlir/test/Transforms
diff options
context:
space:
mode:
authorFrederik Gossen <frgossen@google.com>2020-06-10 13:52:43 +0000
committerFrederik Gossen <frgossen@google.com>2020-06-10 13:54:47 +0000
commit904f91db5fcd74f493811df0787a1ddea651d03c (patch)
treef6676814818e962fbce4cf1a55a862695ddd09b3 /mlir/test/Transforms
parent756db3084bc188fe6c9ed90992880a95a049c63f (diff)
downloadllvm-904f91db5fcd74f493811df0787a1ddea651d03c.zip
llvm-904f91db5fcd74f493811df0787a1ddea651d03c.tar.gz
llvm-904f91db5fcd74f493811df0787a1ddea651d03c.tar.bz2
[MLIR][Standard] Make the `dim` operation index an operand.
Allow for dynamic indices in the `dim` operation. Rather than an attribute, the index is now an operand of type `index`. This allows to apply the operation to dynamically ranked tensors. The correct lowering of dynamic indices remains to be implemented. Differential Revision: https://reviews.llvm.org/D81551
Diffstat (limited to 'mlir/test/Transforms')
-rw-r--r--mlir/test/Transforms/canonicalize.mlir33
-rw-r--r--mlir/test/Transforms/constant-fold.mlir3
-rw-r--r--mlir/test/Transforms/pipeline-data-transfer.mlir6
3 files changed, 24 insertions, 18 deletions
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index f1ad305..dc5c9a7 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -28,7 +28,8 @@ func @test_subi_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> {
func @dim(%arg0: tensor<8x4xf32>) -> index {
// CHECK: %c4 = constant 4 : index
- %0 = dim %arg0, 1 : tensor<8x4xf32>
+ %c1 = constant 1 : index
+ %0 = dim %arg0, %c1 : tensor<8x4xf32>
// CHECK-NEXT: return %c4
return %0 : index
@@ -51,7 +52,8 @@ func @test_commutative(%arg0: i32) -> (i32, i32) {
// CHECK-LABEL: func @trivial_dce
func @trivial_dce(%arg0: tensor<8x4xf32>) {
- %0 = dim %arg0, 1 : tensor<8x4xf32>
+ %c1 = constant 1 : index
+ %0 = dim %arg0, %c1 : tensor<8x4xf32>
// CHECK-NEXT: return
return
}
@@ -314,7 +316,7 @@ func @memref_cast_folding(%arg0: memref<4 x f32>, %arg1: f32) -> (f32, f32) {
%0 = memref_cast %arg0 : memref<4xf32> to memref<?xf32>
// CHECK-NEXT: %c0 = constant 0 : index
%c0 = constant 0 : index
- %dim = dim %0, 0 : memref<? x f32>
+ %dim = dim %0, %c0 : memref<? x f32>
// CHECK-NEXT: affine.load %arg0[3]
%1 = affine.load %0[%dim - 1] : memref<?xf32>
@@ -442,24 +444,25 @@ func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>,
// CHECK-SAME: [[K:arg[0-9]+]]: index
%c0 = constant 0 : index
%c1 = constant 1 : index
+ %c2 = constant 2 : index
%0 = alloc(%arg0, %arg1) : memref<?x?xf32>
%1 = alloc(%arg1, %arg2) : memref<?x8x?xf32>
- %2 = dim %1, 2 : memref<?x8x?xf32>
+ %2 = dim %1, %c2 : memref<?x8x?xf32>
affine.for %arg3 = 0 to %2 {
%3 = alloc(%arg0) : memref<?xi8>
- %ub = dim %3, 0 : memref<?xi8>
+ %ub = dim %3, %c0 : memref<?xi8>
affine.for %arg4 = 0 to %ub {
- %s = dim %0, 0 : memref<?x?xf32>
+ %s = dim %0, %c0 : memref<?x?xf32>
%v = std.view %3[%c0][%arg4, %s] : memref<?xi8> to memref<?x?xf32>
%sv = subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref<?x?xf32> to memref<?x?xf32, #map1>
- %l = dim %v, 1 : memref<?x?xf32>
- %u = dim %sv, 0 : memref<?x?xf32, #map1>
+ %l = dim %v, %c1 : memref<?x?xf32>
+ %u = dim %sv, %c0 : memref<?x?xf32, #map1>
affine.for %arg5 = %l to %u {
"foo"() : () -> ()
}
%sv2 = subview %0[0, 0][17, %arg4][1, 1] : memref<?x?xf32> to memref<17x?xf32, #map3>
- %l2 = dim %v, 1 : memref<?x?xf32>
- %u2 = dim %sv2, 1 : memref<17x?xf32, #map3>
+ %l2 = dim %v, %c1 : memref<?x?xf32>
+ %u2 = dim %sv2, %c1 : memref<17x?xf32, #map3>
scf.for %arg5 = %l2 to %u2 step %c1 {
"foo"() : () -> ()
}
@@ -480,9 +483,9 @@ func @dim_op_fold(%arg0: index, %arg1: index, %arg2: index, %BUF: memref<?xi8>,
%B = view %BUF[%c0][%K, %N] : memref<?xi8> to memref<?x?xf32>
%C = view %BUF[%c0][%M, %N] : memref<?xi8> to memref<?x?xf32>
- %M_ = dim %A, 0 : memref<?x?xf32>
- %K_ = dim %A, 1 : memref<?x?xf32>
- %N_ = dim %C, 1 : memref<?x?xf32>
+ %M_ = dim %A, %c0 : memref<?x?xf32>
+ %K_ = dim %A, %c1 : memref<?x?xf32>
+ %N_ = dim %C, %c1 : memref<?x?xf32>
scf.for %i = %c0 to %M_ step %c1 {
scf.for %j = %c0 to %N_ step %c1 {
scf.for %k = %c0 to %K_ step %c1 {
@@ -855,8 +858,8 @@ func @subview(%arg0 : index, %arg1 : index) -> (index, index) {
store %v0, %20[%arg1, %arg1] : memref<12x4xf32, offset: ?, strides:[4, 1]>
// Test: dim on subview is rewritten to size operand.
- %7 = dim %4, 0 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
- %8 = dim %4, 1 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
+ %7 = dim %4, %c0 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
+ %8 = dim %4, %c1 : memref<?x?x?xf32, offset : ?, strides : [?, ?, ?]>
// CHECK: return %[[C7]], %[[C11]]
return %7, %8 : index, index
diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir
index 801d591..5562450 100644
--- a/mlir/test/Transforms/constant-fold.mlir
+++ b/mlir/test/Transforms/constant-fold.mlir
@@ -382,7 +382,8 @@ func @muli_splat_vector() -> vector<4xi32> {
func @dim(%x : tensor<8x4xf32>) -> index {
// CHECK:[[C4:%.+]] = constant 4 : index
- %0 = dim %x, 1 : tensor<8x4xf32>
+ %c1 = constant 1 : index
+ %0 = dim %x, %c1 : tensor<8x4xf32>
// CHECK-NEXT: return [[C4]]
return %0 : index
diff --git a/mlir/test/Transforms/pipeline-data-transfer.mlir b/mlir/test/Transforms/pipeline-data-transfer.mlir
index 3c93cc8..aab4f4a 100644
--- a/mlir/test/Transforms/pipeline-data-transfer.mlir
+++ b/mlir/test/Transforms/pipeline-data-transfer.mlir
@@ -330,8 +330,10 @@ func @dynamic_shape_dma_buffer(%arg0: memref<512 x 32 x f32>) {
// Double buffering for dynamic shaped buffer.
// CHECK: alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 2>
-// CHECK-NEXT: dim %{{.*}}, 0 : memref<?x?xf32, 2>
-// CHECK-NEXT: dim %{{.*}}, 1 : memref<?x?xf32, 2>
+// CHECK-NEXT: %[[C0:.*]] = constant 0 : index
+// CHECK-NEXT: dim %{{.*}}, %[[C0]] : memref<?x?xf32, 2>
+// CHECK-NEXT: %[[C1:.*]] = constant 1 : index
+// CHECK-NEXT: dim %{{.*}}, %[[C1]] : memref<?x?xf32, 2>
// CHECK-NEXT: alloc(%{{.*}}, %{{.*}}) : memref<2x?x?xf32, 2>
// CHECK: affine.dma_start %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}}[%{{.*}} mod 2, 0, 0], %{{.*}}[%{{.*}} mod 2, 0], %{{.*}}
affine.for %kTT = 0 to 16 {