aboutsummaryrefslogtreecommitdiff
path: root/mlir
diff options
context:
space:
mode:
authorAndrzej WarzyƄski <andrzej.warzynski@arm.com>2024-03-22 15:53:04 +0000
committerGitHub <noreply@github.com>2024-03-22 15:53:04 +0000
commit01b1b0c1f728e2c2639edc654424f50830295989 (patch)
treeb991d9e0b4b5d8c9e7e0149e848b597dbe4709ef /mlir
parent631e54aa1a0b7a79d0dec8dce7ec0f5e506acf6c (diff)
downloadllvm-01b1b0c1f728e2c2639edc654424f50830295989.zip
llvm-01b1b0c1f728e2c2639edc654424f50830295989.tar.gz
llvm-01b1b0c1f728e2c2639edc654424f50830295989.tar.bz2
[mlir][SVE] Add e2e for 1D depthwise WC convolution (#85225)
Follow-up for https://github.com/llvm/llvm-project/pull/81625
Diffstat (limited to 'mlir')
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir60
1 files changed, 60 insertions, 0 deletions
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir
new file mode 100644
index 0000000..57d6938
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSVE/1d-depthwise-conv.mlir
@@ -0,0 +1,60 @@
+// DEFINE: %{compile} = mlir-opt %s \
+// DEFINE: -transform-interpreter -test-transform-dialect-erase-schedule \
+// DEFINE: -one-shot-bufferize="bufferize-function-boundaries" -lower-vector-mask -cse -canonicalize -convert-vector-to-scf -arm-sve-legalize-vector-storage \
+// DEFINE: -convert-vector-to-llvm="enable-arm-sve" -test-lower-to-llvm -o %t
+// DEFINE: %{entry_point} = conv
+// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve"\
+// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils
+
+// RUN: %{compile} | %{run} | FileCheck %s
+
+func.func @conv() {
+ // Define input/output tensors
+ %input_init = tensor.empty() : tensor<1x8x6xi32>
+ %output_init = tensor.empty() : tensor<1x7x6xi32>
+
+ %five = arith.constant 5 : i32
+ %zero = arith.constant 0 : i32
+ %input = linalg.fill ins(%five : i32) outs(%input_init : tensor<1x8x6xi32>) -> tensor<1x8x6xi32>
+ %output = linalg.fill ins(%zero : i32) outs(%output_init : tensor<1x7x6xi32>) -> tensor<1x7x6xi32>
+
+ // Define the filter tensor
+ %filter = arith.constant dense<[
+ [ 1, 2, 3, 4, 5, 6],
+ [ 11, 12, 13, 14, 15, 16]
+ ]> : tensor<2x6xi32>
+
+ // static sizes -> dynamic sizes
+ %input_dyn = tensor.cast %input_init : tensor<1x8x6xi32> to tensor<1x8x?xi32>
+ %output_dyn = tensor.cast %output : tensor<1x7x6xi32> to tensor<1x7x?xi32>
+ %filter_dyn = tensor.cast %filter : tensor<2x6xi32> to tensor<2x?xi32>
+
+ // Run the convolution
+ %res = linalg.depthwise_conv_1d_nwc_wc
+ ins(%input_dyn, %filter_dyn : tensor<1x8x?xi32>, tensor<2x?xi32>)
+ outs(%output_dyn : tensor<1x7x?xi32>) -> tensor<1x7x?xi32>
+
+ // Print the results
+ // CHECK: SVE: START OF TEST OUTPUT
+ vector.print str "SVE: START OF TEST OUTPUT\n"
+
+ // CHECK-NEXT: Unranked Memref base@ = {{.*}} rank = 3 offset = 0 sizes = [1, 7, 6] strides = [42, 6, 1] data =
+ // CHECK-COUNT-7: [60, 70, 80, 90, 100, 110]
+ %xf = tensor.cast %res : tensor<1x7x?xi32> to tensor<*xi32>
+ call @printMemrefI32(%xf) : (tensor<*xi32>) -> ()
+
+ // CHECK-NEXT: SVE: END OF TEST OUTPUT
+ vector.print str "SVE: END OF TEST OUTPUT\n"
+
+ return
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["linalg.depthwise_conv_1d_nwc_wc"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.structured.vectorize %0 vector_sizes [1, 7, [8], 2] : !transform.any_op
+ transform.yield
+ }
+}
+
+func.func private @printMemrefI32(%ptr : tensor<*xi32>) attributes { llvm.emit_c_interface }