diff options
Diffstat (limited to 'mlir/test')
-rw-r--r-- | mlir/test/Dialect/Tosa/invalid.mlir | 55 | ||||
-rw-r--r-- | mlir/test/Dialect/Tosa/invalid_extension.mlir | 22 | ||||
-rw-r--r-- | mlir/test/Dialect/Tosa/level_check.mlir | 30 | ||||
-rw-r--r-- | mlir/test/Dialect/Tosa/variables.mlir | 132 | ||||
-rw-r--r-- | mlir/test/Dialect/Tosa/verifier.mlir | 56 | ||||
-rw-r--r-- | mlir/test/Dialect/Transform/ops-invalid.mlir | 65 | ||||
-rw-r--r-- | mlir/test/lib/Dialect/Test/TestInterfaces.td | 10 | ||||
-rw-r--r-- | mlir/test/lib/Dialect/Test/TestTypes.cpp | 4 | ||||
-rw-r--r-- | mlir/test/lib/IR/TestInterfaces.cpp | 2 | ||||
-rw-r--r-- | mlir/test/mlir-tblgen/interfaces.mlir | 2 | ||||
-rw-r--r-- | mlir/test/python/dialects/python_test.py | 31 | ||||
-rw-r--r-- | mlir/test/python/lib/CMakeLists.txt | 1 | ||||
-rw-r--r-- | mlir/test/python/lib/PythonTestModulePybind11.cpp | 118 |
13 files changed, 254 insertions, 274 deletions
diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index 41c3243..e60f1c9b 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -573,64 +573,61 @@ func.func @test_avg_pool2d_zero_dim_input(%arg0: tensor<1x0x?x9xf32>, %arg1: ten // ----- -func.func @test_variable_unranked(%arg0: tensor<2x4x8xi8>) -> () { +module { tosa.variable @stored_var : tensor<*xi8> // expected-error@+1 {{custom op 'tosa.variable' expected ranked type}} - return } // ----- -func.func @test_variable_unranked_initial_value(%arg0: tensor<2x4x8xi8>) -> () { +module { // expected-error@+1 {{elements literal type must have static shape}} tosa.variable @stored_var = dense<0> : tensor<*xi8> // expected-error@+1 {{custom op 'tosa.variable' expected attribute}} - return -} - -// ----- - -func.func @test_variable_duplicates(%arg0: tensor<2x4x8xi8>) -> () { - tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable' op illegal to have multiple declaration of 'stored_var'}} - tosa.variable @stored_var = dense<3> : tensor<1x4x8xi8> - return } // ----- -func.func @test_variable_read_type(%arg0: tensor<2x4x8xi8>) -> () { +module { tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable_read' op require same element type for 'output1' ('i16') and the input tensor ('i8')}} - %0 = tosa.variable_read @stored_var : tensor<2x4x8xi16> - return + func.func @test_variable_read_type(%arg0: tensor<2x4x8xi8>) -> () { + // expected-error@+1 {{'tosa.variable_read' op require same element type for 'output1' ('i16') and the input tensor ('i8')}} + %0 = tosa.variable_read @stored_var : tensor<2x4x8xi16> + return + } } // ----- -func.func @test_variable_read_shape(%arg0: tensor<2x4x8xi8>) -> () { +module { tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable_read' op require same element type for 'output1' ('i32') and the input tensor ('i8'}} - %0 = tosa.variable_read @stored_var : tensor<1x4x8xi32> - return + func.func @test_variable_read_shape(%arg0: tensor<2x4x8xi8>) -> () { + // expected-error@+1 {{'tosa.variable_read' op require same element type for 'output1' ('i32') and the input tensor ('i8'}} + %0 = tosa.variable_read @stored_var : tensor<1x4x8xi32> + return + } } // ----- -func.func @test_variable_write_type(%arg0: tensor<2x4x8xi16>) -> () { +module { tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable_write' op require same element type for 'input1' ('i16') and the input tensor ('i8')}} - tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xi16> - return + func.func @test_variable_write_type(%arg0: tensor<2x4x8xi16>) -> () { + // expected-error@+1 {{'tosa.variable_write' op require same element type for 'input1' ('i16') and the input tensor ('i8')}} + tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xi16> + return + } } // ----- -func.func @test_variable_write_shape(%arg0: tensor<1x4x8xi8>) -> () { +module { tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable_write' op require same shapes for 'input1' ('tensor<1x4x8xi8>') and the input tensor ('tensor<2x4x8xi8>')}} - tosa.variable_write @stored_var, %arg0 : tensor<1x4x8xi8> - return + func.func @test_variable_write_shape(%arg0: tensor<1x4x8xi8>) -> () { + // expected-error@+1 {{'tosa.variable_write' op require same shapes for 'input1' ('tensor<1x4x8xi8>') and the input tensor ('tensor<2x4x8xi8>')}} + tosa.variable_write @stored_var, %arg0 : tensor<1x4x8xi8> + return + } } // ----- diff --git a/mlir/test/Dialect/Tosa/invalid_extension.mlir b/mlir/test/Dialect/Tosa/invalid_extension.mlir index 3138ce2..1daabe9 100644 --- a/mlir/test/Dialect/Tosa/invalid_extension.mlir +++ b/mlir/test/Dialect/Tosa/invalid_extension.mlir @@ -310,21 +310,27 @@ func.func @test_identity(%arg0: tensor<13x21x3xi4>) -> tensor<13x21x3xi4> { } // ----- -func.func @test_variable_read_type(%arg0: tensor<2x4x8xi8>) -> () { +module { // expected-error@+1 {{'tosa.variable' op illegal: requires [variable] but not enabled in target}} tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable_read' op illegal: requires [variable]}} - %0 = tosa.variable_read @stored_var : tensor<2x4x8xi8> - return + + func.func @test_variable_read_type(%arg0: tensor<2x4x8xi8>) -> () { + // expected-error@+1 {{'tosa.variable_read' op illegal: requires [variable]}} + %0 = tosa.variable_read @stored_var : tensor<2x4x8xi8> + return + } } // ----- -func.func @test_variable_write_type(%arg0: tensor<2x4x8xi8>) -> () { +module { // expected-error@+1 {{'tosa.variable' op illegal: requires [variable] but not enabled in target}} tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi8> - // expected-error@+1 {{'tosa.variable_write' op illegal: requires [variable]}} - tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xi8> - return + + func.func @test_variable_write_type(%arg0: tensor<2x4x8xi8>) -> () { + // expected-error@+1 {{'tosa.variable_write' op illegal: requires [variable]}} + tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xi8> + return + } } // ----- diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir index 3742adf..5bf2dbb8 100644 --- a/mlir/test/Dialect/Tosa/level_check.mlir +++ b/mlir/test/Dialect/Tosa/level_check.mlir @@ -1097,14 +1097,17 @@ func.func @test_scatter_tensor_size_invalid(%arg0: tensor<13x260000000x3xf32>, % // ----- -func.func @test_variable_read_write_tensor_size_invalid() -> () { +module { // expected-error@+1 {{'tosa.variable' op failed level check: variable type tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)}} tosa.variable @stored_var : tensor<536870912xf32> - // expected-error@+1 {{'tosa.variable_read' op failed level check: result tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)}} - %0 = tosa.variable_read @stored_var : tensor<536870912xf32> - // expected-error@+1 {{'tosa.variable_write' op failed level check: operand tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)}} - tosa.variable_write @stored_var, %0 : tensor<536870912xf32> - return + + func.func @test_variable_read_write_tensor_size_invalid() -> () { + // expected-error@+1 {{'tosa.variable_read' op failed level check: result tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)}} + %0 = tosa.variable_read @stored_var : tensor<536870912xf32> + // expected-error@+1 {{'tosa.variable_write' op failed level check: operand tensor size (in bytes) <= (1 << MAX_LOG2_SIZE - 1)}} + tosa.variable_write @stored_var, %0 : tensor<536870912xf32> + return + } } // ----- @@ -1165,14 +1168,17 @@ func.func @test_cond_if_rank_invalid(%arg0: tensor<1x1x1x1x1x1x1x1xf32>, %arg1: // ----- -func.func @test_variable_read_write_rank_invalid() -> () { +module { // expected-error@+1 {{'tosa.variable' op failed level check: variable type rank(shape) <= MAX_RANK}} tosa.variable @stored_var : tensor<1x1x1x1x1x1x1x1xf32> - // expected-error@+1 {{'tosa.variable_read' op failed level check: result rank(shape) <= MAX_RANK}} - %0 = tosa.variable_read @stored_var : tensor<1x1x1x1x1x1x1x1xf32> - // expected-error@+1 {{'tosa.variable_write' op failed level check: operand rank(shape) <= MAX_RANK}} - tosa.variable_write @stored_var, %0 : tensor<1x1x1x1x1x1x1x1xf32> - return + + func.func @test_variable_read_write_rank_invalid() -> () { + // expected-error@+1 {{'tosa.variable_read' op failed level check: result rank(shape) <= MAX_RANK}} + %0 = tosa.variable_read @stored_var : tensor<1x1x1x1x1x1x1x1xf32> + // expected-error@+1 {{'tosa.variable_write' op failed level check: operand rank(shape) <= MAX_RANK}} + tosa.variable_write @stored_var, %0 : tensor<1x1x1x1x1x1x1x1xf32> + return + } } // ----- diff --git a/mlir/test/Dialect/Tosa/variables.mlir b/mlir/test/Dialect/Tosa/variables.mlir index 9953eb3..0c104e8 100644 --- a/mlir/test/Dialect/Tosa/variables.mlir +++ b/mlir/test/Dialect/Tosa/variables.mlir @@ -3,76 +3,98 @@ // ----- -// CHECK-LABEL: @test_variable_scalar( -// CHECK-SAME: %[[ADD_VAL:.*]]: tensor<f32>) { -func.func @test_variable_scalar(%arg0: tensor<f32>) -> () { - // CHECK: tosa.variable @stored_var = dense<3.140000e+00> : tensor<f32> + +module { + // CHECK: tosa.variable @stored_var = dense<3.140000e+00> : tensor<f32> tosa.variable @stored_var = dense<3.14> : tensor<f32> - // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<f32> - %0 = tosa.variable_read @stored_var : tensor<f32> - // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<f32>, tensor<f32>) -> tensor<f32> - %1 = "tosa.add"(%arg0, %0) : (tensor<f32>, tensor<f32>) -> tensor<f32> - // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<f32> - tosa.variable_write @stored_var, %1 : tensor<f32> - return + + // CHECK-LABEL: @test_variable_scalar( + // CHECK-SAME: %[[ADD_VAL:.*]]: tensor<f32>) { + func.func @test_variable_scalar(%arg0: tensor<f32>) -> () { + // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<f32> + %0 = tosa.variable_read @stored_var : tensor<f32> + // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<f32>, tensor<f32>) -> tensor<f32> + %1 = "tosa.add"(%arg0, %0) : (tensor<f32>, tensor<f32>) -> tensor<f32> + // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<f32> + tosa.variable_write @stored_var, %1 : tensor<f32> + return + } } + // ----- -// CHECK-LABEL: @test_variable_tensor( -// CHECK-SAME: %[[ADD_VAL:.*]]: tensor<2x4x8xi32>) { -func.func @test_variable_tensor(%arg0: tensor<2x4x8xi32>) -> () { - // CHECK: tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi32> + +module { + // CHECK: tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi32> tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi32> - // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<2x4x8xi32> - %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> - // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> - %1 = "tosa.add"(%arg0, %0) : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> - // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<2x4x8xi32> - tosa.variable_write @stored_var, %1 : tensor<2x4x8xi32> - return + + // CHECK-LABEL: @test_variable_tensor( + // CHECK-SAME: %[[ADD_VAL:.*]]: tensor<2x4x8xi32>) { + func.func @test_variable_tensor(%arg0: tensor<2x4x8xi32>) -> () { + // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<2x4x8xi32> + %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> + // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> + %1 = "tosa.add"(%arg0, %0) : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> + // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<2x4x8xi32> + tosa.variable_write @stored_var, %1 : tensor<2x4x8xi32> + return + } } // ----- -// CHECK-LABEL: @test_variable_scalar_no_initial_value( -// CHECK-SAME: %[[ADD_VAL:.*]]: tensor<f32>) { -func.func @test_variable_scalar_no_initial_value(%arg0: tensor<f32>) -> () { - // CHECK: tosa.variable @stored_var : tensor<f32> + +module { + // CHECK: tosa.variable @stored_var : tensor<f32> tosa.variable @stored_var : tensor<f32> - // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<f32> - %0 = tosa.variable_read @stored_var : tensor<f32> - // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<f32>, tensor<f32>) -> tensor<f32> - %1 = "tosa.add"(%arg0, %0) : (tensor<f32>, tensor<f32>) -> tensor<f32> - // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<f32> - tosa.variable_write @stored_var, %1 : tensor<f32> - return + + // CHECK-LABEL: @test_variable_scalar_no_initial_value( + // CHECK-SAME: %[[ADD_VAL:.*]]: tensor<f32>) { + func.func @test_variable_scalar_no_initial_value(%arg0: tensor<f32>) -> () { + // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<f32> + %0 = tosa.variable_read @stored_var : tensor<f32> + // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<f32>, tensor<f32>) -> tensor<f32> + %1 = "tosa.add"(%arg0, %0) : (tensor<f32>, tensor<f32>) -> tensor<f32> + // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<f32> + tosa.variable_write @stored_var, %1 : tensor<f32> + return + } } // ----- -// CHECK-LABEL: @test_variable_tensor_no_initial_value( -// CHECK-SAME: %[[ADD_VAL:.*]]: tensor<2x4x8xi32>) { -func.func @test_variable_tensor_no_initial_value(%arg0: tensor<2x4x8xi32>) -> () { - // CHECK: tosa.variable @stored_var : tensor<2x4x8xi32> + +module { + // CHECK: tosa.variable @stored_var : tensor<2x4x8xi32> tosa.variable @stored_var : tensor<2x4x8xi32> - // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<2x4x8xi32> - %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> - // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> - %1 = "tosa.add"(%arg0, %0) : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> - // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<2x4x8xi32> - tosa.variable_write @stored_var, %1 : tensor<2x4x8xi32> - return + + // CHECK-LABEL: @test_variable_tensor_no_initial_value( + // CHECK-SAME: %[[ADD_VAL:.*]]: tensor<2x4x8xi32>) { + func.func @test_variable_tensor_no_initial_value(%arg0: tensor<2x4x8xi32>) -> () { + // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<2x4x8xi32> + %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> + // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> + %1 = "tosa.add"(%arg0, %0) : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> + // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<2x4x8xi32> + tosa.variable_write @stored_var, %1 : tensor<2x4x8xi32> + return + } } + // ----- -// CHECK-LABEL: @test_variable_tensor_with_unknowns( -// CHECK-SAME: %[[ADD_VAL:.*]]: tensor<2x4x8xi32>) { -func.func @test_variable_tensor_with_unknowns(%arg0: tensor<2x4x8xi32>) -> () { - // CHECK: tosa.variable @stored_var : tensor<2x?x8xi32> + +module { + // CHECK: tosa.variable @stored_var : tensor<2x?x8xi32> tosa.variable @stored_var : tensor<2x?x8xi32> - // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<2x4x8xi32> - %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> - // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> - %1 = "tosa.add"(%arg0, %0) : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> - // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<2x4x8xi32> - tosa.variable_write @stored_var, %1 : tensor<2x4x8xi32> - return + + // CHECK-LABEL: @test_variable_tensor_with_unknowns( + // CHECK-SAME: %[[ADD_VAL:.*]]: tensor<2x4x8xi32>) { + func.func @test_variable_tensor_with_unknowns(%arg0: tensor<2x4x8xi32>) -> () { + // CHECK: %[[STORED_VAL:.*]] = tosa.variable_read @stored_var : tensor<2x4x8xi32> + %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> + // CHECK: %[[RESULT_ADD:.*]] = tosa.add %[[ADD_VAL]], %[[STORED_VAL]] : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> + %1 = "tosa.add"(%arg0, %0) : (tensor<2x4x8xi32>, tensor<2x4x8xi32>) -> tensor<2x4x8xi32> + // CHECK: tosa.variable_write @stored_var, %[[RESULT_ADD]] : tensor<2x4x8xi32> + tosa.variable_write @stored_var, %1 : tensor<2x4x8xi32> + return + } } diff --git a/mlir/test/Dialect/Tosa/verifier.mlir b/mlir/test/Dialect/Tosa/verifier.mlir index 0128da7..430b06a 100644 --- a/mlir/test/Dialect/Tosa/verifier.mlir +++ b/mlir/test/Dialect/Tosa/verifier.mlir @@ -944,29 +944,27 @@ func.func @test_while_loop_cond_output_not_bool(%arg0: tensor<10xi32>, %arg1: te // ----- -func.func @test_variable_multiple_declaration() -> () { +module { + // expected-note@below {{see existing symbol definition here}} tosa.variable @stored_var = dense<-1> : tensor<2x4x8xi32> - // expected-error@+1 {{'tosa.variable' op illegal to have multiple declaration of 'stored_var'}} + // expected-error@+1 {{redefinition of symbol named 'stored_var'}} tosa.variable @stored_var = dense<-3> : tensor<2x4x8xi32> - return } // ----- -func.func @test_variable_shape_mismatch() -> () { +module { // expected-error@+1 {{inferred shape of elements literal ([2]) does not match type ([3])}} tosa.variable @stored_var = dense<[3.14, 2.14]> : tensor<3xf32> // expected-error@+1 {{custom op 'tosa.variable' expected attribute}} - return } // ----- -func.func @test_variable_type_mismatch() -> () { +module { // expected-error@+1 {{expected integer elements, but parsed floating-point}} tosa.variable @stored_var = dense<-1.2> : tensor<2x4x8xi32> // expected-error@+1 {{custom op 'tosa.variable' expected attribute}} - return } // ----- @@ -979,20 +977,26 @@ func.func @test_variable_read_no_declaration() -> () { // ----- -func.func @test_variable_read_type_mismatch() -> () { +module { tosa.variable @stored_var = dense<-1.2> : tensor<2x4x8xf32> - // expected-error@+1 {{'tosa.variable_read' op require same element type for 'output1' ('i32') and the input tensor ('f32')}} - %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> - return + + func.func @test_variable_read_type_mismatch() -> () { + // expected-error@+1 {{'tosa.variable_read' op require same element type for 'output1' ('i32') and the input tensor ('f32')}} + %0 = tosa.variable_read @stored_var : tensor<2x4x8xi32> + return + } } // ----- -func.func @test_variable_read_shape_mismatch() -> () { +module { tosa.variable @stored_var = dense<-1.2> : tensor<8x4x2xf32> - // expected-error@+1 {{'tosa.variable_read' op require same shapes for 'output1' ('tensor<2x4x8xf32>') and the input tensor ('tensor<8x4x2xf32>')}} - %0 = tosa.variable_read @stored_var : tensor<2x4x8xf32> - return + + func.func @test_variable_read_shape_mismatch() -> () { + // expected-error@+1 {{'tosa.variable_read' op require same shapes for 'output1' ('tensor<2x4x8xf32>') and the input tensor ('tensor<8x4x2xf32>')}} + %0 = tosa.variable_read @stored_var : tensor<2x4x8xf32> + return + } } // ----- @@ -1005,20 +1009,26 @@ func.func @test_variable_write_no_declaration(%arg0: tensor<f32>) -> () { // ----- -func.func @test_variable_write_type_mismatch(%arg0: tensor<2x4x8xi32>) -> () { +module { tosa.variable @stored_var = dense<-1.2> : tensor<2x4x8xf32> - // expected-error@+1 {{'tosa.variable_write' op require same element type for 'input1' ('i32') and the input tensor ('f32')}} - tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xi32> - return + + func.func @test_variable_write_type_mismatch(%arg0: tensor<2x4x8xi32>) -> () { + // expected-error@+1 {{'tosa.variable_write' op require same element type for 'input1' ('i32') and the input tensor ('f32')}} + tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xi32> + return + } } // ----- -func.func @test_variable_write_shape_mismatch(%arg0: tensor<2x4x8xf32>) -> () { +module { tosa.variable @stored_var = dense<-1.2> : tensor<8x4x2xf32> - // expected-error@+1 {{'tosa.variable_write' op require same shapes for 'input1' ('tensor<2x4x8xf32>') and the input tensor ('tensor<8x4x2xf32>')}} - tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xf32> - return + + func.func @test_variable_write_shape_mismatch(%arg0: tensor<2x4x8xf32>) -> () { + // expected-error@+1 {{'tosa.variable_write' op require same shapes for 'input1' ('tensor<2x4x8xf32>') and the input tensor ('tensor<8x4x2xf32>')}} + tosa.variable_write @stored_var, %arg0 : tensor<2x4x8xf32> + return + } } // ----- diff --git a/mlir/test/Dialect/Transform/ops-invalid.mlir b/mlir/test/Dialect/Transform/ops-invalid.mlir index 71a260f..68305de 100644 --- a/mlir/test/Dialect/Transform/ops-invalid.mlir +++ b/mlir/test/Dialect/Transform/ops-invalid.mlir @@ -369,6 +369,7 @@ module attributes { transform.with_named_sequence } { // expected-error @below {{recursion not allowed in named sequences}} transform.named_sequence @self_recursion() -> () { transform.include @self_recursion failures(suppress) () : () -> () + transform.yield } } @@ -376,13 +377,13 @@ module attributes { transform.with_named_sequence } { module @mutual_recursion attributes { transform.with_named_sequence } { // expected-note @below {{operation on recursion stack}} - transform.named_sequence @foo(%arg0: !transform.any_op) -> () { + transform.named_sequence @foo(%arg0: !transform.any_op {transform.readonly}) -> () { transform.include @bar failures(suppress) (%arg0) : (!transform.any_op) -> () transform.yield } // expected-error @below {{recursion not allowed in named sequences}} - transform.named_sequence @bar(%arg0: !transform.any_op) -> () { + transform.named_sequence @bar(%arg0: !transform.any_op {transform.readonly}) -> () { transform.include @foo failures(propagate) (%arg0) : (!transform.any_op) -> () transform.yield } @@ -430,7 +431,7 @@ module attributes { transform.with_named_sequence } { // ----- module attributes { transform.with_named_sequence } { - transform.named_sequence @foo(%arg0: !transform.any_op) -> () { + transform.named_sequence @foo(%arg0: !transform.any_op {transform.readonly}) -> () { transform.yield } @@ -444,7 +445,7 @@ module attributes { transform.with_named_sequence } { // ----- module attributes { transform.with_named_sequence } { - transform.named_sequence @foo(%arg0: !transform.any_op) -> (!transform.any_op) { + transform.named_sequence @foo(%arg0: !transform.any_op {transform.readonly}) -> (!transform.any_op) { transform.yield %arg0 : !transform.any_op } @@ -458,7 +459,7 @@ module attributes { transform.with_named_sequence } { // ----- module attributes { transform.with_named_sequence } { - transform.named_sequence @foo(%arg0: !transform.any_op) -> (!transform.any_op) { + transform.named_sequence @foo(%arg0: !transform.any_op {transform.readonly}) -> (!transform.any_op) { transform.yield %arg0 : !transform.any_op } @@ -543,7 +544,6 @@ module attributes { transform.with_named_sequence } { // ----- module attributes { transform.with_named_sequence } { - // expected-error @below {{must provide consumed/readonly status for arguments of external or called ops}} transform.named_sequence @foo(%op: !transform.any_op) { transform.debug.emit_remark_at %op, "message" : !transform.any_op transform.yield @@ -551,6 +551,8 @@ module attributes { transform.with_named_sequence } { transform.sequence failures(propagate) { ^bb0(%arg0: !transform.any_op): + // expected-error @below {{TransformOpInterface requires memory effects on operands to be specified}} + // expected-note @below {{no effects specified for operand #0}} transform.include @foo failures(propagate) (%arg0) : (!transform.any_op) -> () transform.yield } @@ -908,3 +910,54 @@ module attributes { transform.with_named_sequence } { transform.yield } } + +// ----- + +module attributes { transform.with_named_sequence } { + transform.named_sequence @__transform_main(%arg0: !transform.any_op) -> () { + // Intentionally malformed func with no region. This shouldn't crash the + // verifier of `with_named_sequence` that runs before we get to the + // function. + // expected-error @below {{requires one region}} + "func.func"() : () -> () + transform.yield + } +} + +// ----- + +module attributes { transform.with_named_sequence } { + transform.named_sequence @__transform_main(%arg0: !transform.any_op) -> () { + // Intentionally malformed call with a region. This shouldn't crash the + // verifier of `with_named_sequence` that runs before we get to the call. + // expected-error @below {{requires zero regions}} + "func.call"() <{ + function_type = () -> (), + sym_name = "lambda_function" + }> ({ + ^bb0: + "func.return"() : () -> () + }) : () -> () + transform.yield + } +} + +// ----- + +module attributes { transform.with_named_sequence } { + // Intentionally malformed sequence where the verifier should not crash. + // expected-error @below {{ op expects argument attribute array to have the same number of elements as the number of function arguments, got 1, but expected 3}} + "transform.named_sequence"() <{ + arg_attrs = [{transform.readonly}], + function_type = (i1, tensor<f32>, tensor<f32>) -> (), + sym_name = "print_message" + }> ({}) : () -> () + "transform.named_sequence"() <{ + function_type = (!transform.any_op) -> (), + sym_name = "reference_other_module" + }> ({ + ^bb0(%arg0: !transform.any_op): + "transform.include"(%arg0) <{target = @print_message}> : (!transform.any_op) -> () + "transform.yield"() : () -> () + }) : () -> () +} diff --git a/mlir/test/lib/Dialect/Test/TestInterfaces.td b/mlir/test/lib/Dialect/Test/TestInterfaces.td index d3d96ea5..3697e38 100644 --- a/mlir/test/lib/Dialect/Test/TestInterfaces.td +++ b/mlir/test/lib/Dialect/Test/TestInterfaces.td @@ -44,6 +44,16 @@ def TestTypeInterface InterfaceMethod<"Prints the type name.", "void", "printTypeC", (ins "::mlir::Location":$loc) >, + // Check that we can have multiple method with the same name. + InterfaceMethod<"Prints the type name, with a value prefixed.", + "void", "printTypeC", (ins "::mlir::Location":$loc, "int":$value) + >, + InterfaceMethod<"Prints the type name, with a value prefixed.", + "void", "printTypeC", (ins "::mlir::Location":$loc, "float":$value), + [{}], /*defaultImplementation=*/[{ + emitRemark(loc) << $_type << " - " << value << " - Float TestC"; + }] + >, // It should be possible to use the interface type name as result type // as well as in the implementation. InterfaceMethod<"Prints the type name and returns the type as interface.", diff --git a/mlir/test/lib/Dialect/Test/TestTypes.cpp b/mlir/test/lib/Dialect/Test/TestTypes.cpp index bea043f..614121f 100644 --- a/mlir/test/lib/Dialect/Test/TestTypes.cpp +++ b/mlir/test/lib/Dialect/Test/TestTypes.cpp @@ -245,6 +245,10 @@ void TestType::printTypeC(Location loc) const { emitRemark(loc) << *this << " - TestC"; } +void TestType::printTypeC(Location loc, int value) const { + emitRemark(loc) << *this << " - " << value << " - Int TestC"; +} + //===----------------------------------------------------------------------===// // TestTypeWithLayout //===----------------------------------------------------------------------===// diff --git a/mlir/test/lib/IR/TestInterfaces.cpp b/mlir/test/lib/IR/TestInterfaces.cpp index 2dd3fe2..881019d 100644 --- a/mlir/test/lib/IR/TestInterfaces.cpp +++ b/mlir/test/lib/IR/TestInterfaces.cpp @@ -31,6 +31,8 @@ struct TestTypeInterfaces testInterface.printTypeA(op->getLoc()); testInterface.printTypeB(op->getLoc()); testInterface.printTypeC(op->getLoc()); + testInterface.printTypeC(op->getLoc(), 42); + testInterface.printTypeC(op->getLoc(), 3.14f); testInterface.printTypeD(op->getLoc()); // Just check that we can assign the result to a variable of interface // type. diff --git a/mlir/test/mlir-tblgen/interfaces.mlir b/mlir/test/mlir-tblgen/interfaces.mlir index 5c1ec61..b5d694f 100644 --- a/mlir/test/mlir-tblgen/interfaces.mlir +++ b/mlir/test/mlir-tblgen/interfaces.mlir @@ -3,6 +3,8 @@ // expected-remark@below {{'!test.test_type' - TestA}} // expected-remark@below {{'!test.test_type' - TestB}} // expected-remark@below {{'!test.test_type' - TestC}} +// expected-remark@below {{'!test.test_type' - 42 - Int TestC}} +// expected-remark@below {{'!test.test_type' - 3.140000e+00 - Float TestC}} // expected-remark@below {{'!test.test_type' - TestD}} // expected-remark@below {{'!test.test_type' - TestRet}} // expected-remark@below {{'!test.test_type' - TestE}} diff --git a/mlir/test/python/dialects/python_test.py b/mlir/test/python/dialects/python_test.py index 1194e32..5a9acc7 100644 --- a/mlir/test/python/dialects/python_test.py +++ b/mlir/test/python/dialects/python_test.py @@ -1,5 +1,4 @@ -# RUN: %PYTHON %s pybind11 | FileCheck %s -# RUN: %PYTHON %s nanobind | FileCheck %s +# RUN: %PYTHON %s | FileCheck %s import sys import typing from typing import Union, Optional @@ -10,26 +9,14 @@ import mlir.dialects.python_test as test import mlir.dialects.tensor as tensor import mlir.dialects.arith as arith -if sys.argv[1] == "pybind11": - from mlir._mlir_libs._mlirPythonTestPybind11 import ( - TestAttr, - TestType, - TestTensorValue, - TestIntegerRankedTensorType, - ) - - test.register_python_test_dialect(get_dialect_registry(), use_nanobind=False) -elif sys.argv[1] == "nanobind": - from mlir._mlir_libs._mlirPythonTestNanobind import ( - TestAttr, - TestType, - TestTensorValue, - TestIntegerRankedTensorType, - ) - - test.register_python_test_dialect(get_dialect_registry(), use_nanobind=True) -else: - raise ValueError("Expected pybind11 or nanobind as argument") +from mlir._mlir_libs._mlirPythonTestNanobind import ( + TestAttr, + TestType, + TestTensorValue, + TestIntegerRankedTensorType, +) + +test.register_python_test_dialect(get_dialect_registry()) def run(f): diff --git a/mlir/test/python/lib/CMakeLists.txt b/mlir/test/python/lib/CMakeLists.txt index 9a813da..f51a7b4 100644 --- a/mlir/test/python/lib/CMakeLists.txt +++ b/mlir/test/python/lib/CMakeLists.txt @@ -1,7 +1,6 @@ set(LLVM_OPTIONAL_SOURCES PythonTestCAPI.cpp PythonTestDialect.cpp - PythonTestModulePybind11.cpp PythonTestModuleNanobind.cpp ) diff --git a/mlir/test/python/lib/PythonTestModulePybind11.cpp b/mlir/test/python/lib/PythonTestModulePybind11.cpp deleted file mode 100644 index 94a5f51..0000000 --- a/mlir/test/python/lib/PythonTestModulePybind11.cpp +++ /dev/null @@ -1,118 +0,0 @@ -//===- PythonTestModule.cpp - Python extension for the PythonTest dialect -===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// This is the pybind11 edition of the PythonTest dialect module. -//===----------------------------------------------------------------------===// - -#include "PythonTestCAPI.h" -#include "mlir-c/BuiltinAttributes.h" -#include "mlir-c/BuiltinTypes.h" -#include "mlir-c/IR.h" -#include "mlir/Bindings/Python/PybindAdaptors.h" - -namespace py = pybind11; -using namespace mlir::python::adaptors; -using namespace pybind11::literals; - -static bool mlirTypeIsARankedIntegerTensor(MlirType t) { - return mlirTypeIsARankedTensor(t) && - mlirTypeIsAInteger(mlirShapedTypeGetElementType(t)); -} - -PYBIND11_MODULE(_mlirPythonTestPybind11, m) { - m.def( - "register_python_test_dialect", - [](MlirContext context, bool load) { - MlirDialectHandle pythonTestDialect = - mlirGetDialectHandle__python_test__(); - mlirDialectHandleRegisterDialect(pythonTestDialect, context); - if (load) { - mlirDialectHandleLoadDialect(pythonTestDialect, context); - } - }, - py::arg("context"), py::arg("load") = true); - - m.def( - "register_dialect", - [](MlirDialectRegistry registry) { - MlirDialectHandle pythonTestDialect = - mlirGetDialectHandle__python_test__(); - mlirDialectHandleInsertDialect(pythonTestDialect, registry); - }, - py::arg("registry")); - - mlir_attribute_subclass(m, "TestAttr", - mlirAttributeIsAPythonTestTestAttribute, - mlirPythonTestTestAttributeGetTypeID) - .def_classmethod( - "get", - [](const py::object &cls, MlirContext ctx) { - return cls(mlirPythonTestTestAttributeGet(ctx)); - }, - py::arg("cls"), py::arg("context") = py::none()); - - mlir_type_subclass(m, "TestType", mlirTypeIsAPythonTestTestType, - mlirPythonTestTestTypeGetTypeID) - .def_classmethod( - "get", - [](const py::object &cls, MlirContext ctx) { - return cls(mlirPythonTestTestTypeGet(ctx)); - }, - py::arg("cls"), py::arg("context") = py::none()); - - auto typeCls = - mlir_type_subclass(m, "TestIntegerRankedTensorType", - mlirTypeIsARankedIntegerTensor, - py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr("RankedTensorType")) - .def_classmethod( - "get", - [](const py::object &cls, std::vector<int64_t> shape, - unsigned width, MlirContext ctx) { - MlirAttribute encoding = mlirAttributeGetNull(); - return cls(mlirRankedTensorTypeGet( - shape.size(), shape.data(), mlirIntegerTypeGet(ctx, width), - encoding)); - }, - "cls"_a, "shape"_a, "width"_a, "context"_a = py::none()); - - assert(py::hasattr(typeCls.get_class(), "static_typeid") && - "TestIntegerRankedTensorType has no static_typeid"); - - MlirTypeID mlirRankedTensorTypeID = mlirRankedTensorTypeGetTypeID(); - - py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr(MLIR_PYTHON_CAPI_TYPE_CASTER_REGISTER_ATTR)(mlirRankedTensorTypeID, - "replace"_a = true)( - pybind11::cpp_function([typeCls](const py::object &mlirType) { - return typeCls.get_class()(mlirType); - })); - - auto valueCls = mlir_value_subclass(m, "TestTensorValue", - mlirTypeIsAPythonTestTestTensorValue) - .def("is_null", [](MlirValue &self) { - return mlirValueIsNull(self); - }); - - py::module::import(MAKE_MLIR_PYTHON_QUALNAME("ir")) - .attr(MLIR_PYTHON_CAPI_VALUE_CASTER_REGISTER_ATTR)( - mlirRankedTensorTypeID)( - pybind11::cpp_function([valueCls](const py::object &valueObj) { - py::object capsule = mlirApiObjectToCapsule(valueObj); - MlirValue v = mlirPythonCapsuleToValue(capsule.ptr()); - MlirType t = mlirValueGetType(v); - // This is hyper-specific in order to exercise/test registering a - // value caster from cpp (but only for a single test case; see - // testTensorValue python_test.py). - if (mlirShapedTypeHasStaticShape(t) && - mlirShapedTypeGetDimSize(t, 0) == 1 && - mlirShapedTypeGetDimSize(t, 1) == 2 && - mlirShapedTypeGetDimSize(t, 2) == 3) - return valueCls.get_class()(valueObj); - return valueObj; - })); -} |