diff options
author | Aart Bik <ajcbik@google.com> | 2024-06-11 14:20:58 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-11 14:20:58 -0700 |
commit | 438a7d4c982e0a38aaa6544a5ba6736d54600733 (patch) | |
tree | 52e097e308b24ed41606c47db74f60524aacfa20 /mlir/test/Dialect | |
parent | c6ee5628a75feeb4fccc8272a68eb8303fb1734b (diff) | |
download | llvm-438a7d4c982e0a38aaa6544a5ba6736d54600733.zip llvm-438a7d4c982e0a38aaa6544a5ba6736d54600733.tar.gz llvm-438a7d4c982e0a38aaa6544a5ba6736d54600733.tar.bz2 |
[mlir][sparse] expose optimization flags to mini pipeline (#95158)
Some of the options only fed into the full sparse pipeline. However,
some backends prefer to use the sparse minipipeline. This change exposes
some important optimization flags to the pass as well. This prepares
some SIMDization of PyTorch sparsified code.
Diffstat (limited to 'mlir/test/Dialect')
-rwxr-xr-x | mlir/test/Dialect/SparseTensor/minipipeline_vector.mlir | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/mlir/test/Dialect/SparseTensor/minipipeline_vector.mlir b/mlir/test/Dialect/SparseTensor/minipipeline_vector.mlir new file mode 100755 index 0000000..2475aa5 --- /dev/null +++ b/mlir/test/Dialect/SparseTensor/minipipeline_vector.mlir @@ -0,0 +1,43 @@ +// RUN: mlir-opt %s --sparsification-and-bufferization | FileCheck %s --check-prefix=CHECK-NOVEC +// RUN: mlir-opt %s --sparsification-and-bufferization="vl=8" | FileCheck %s --check-prefix=CHECK-VEC + +// Test to ensure we can pass optimization flags into +// the mini sparsification and bufferization pipeline. + +#SV = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> + +#trait_sum_reduction = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a + affine_map<(i) -> ()> // x (scalar out) + ], + iterator_types = ["reduction"], + doc = "x += SUM_i a(i)" +} + +// +// CHECK-NOVEC-LABEL: func.func @sum_reduction +// CHECK-NOVEC: scf.for +// CHECK-NOVEC: arith.addf %{{.*}} %{{.*}} : f32 +// CHECK-NOVEC: } +// +// CHECK-VEC-LABEL: func.func @sum_reduction +// CHECK-VEC: vector.insertelement +// CHECK-VEC: scf.for +// CHECK-VEC: vector.create_mask +// CHECK-VEC: vector.maskedload +// CHECK-VEC: arith.addf %{{.*}} %{{.*}} : vector<8xf32> +// CHECK-VEC: } +// CHECK-VEC: vector.reduction <add> +// +func.func @sum_reduction(%arga: tensor<?xf32, #SV>, + %argx: tensor<f32>) -> tensor<f32> { + %0 = linalg.generic #trait_sum_reduction + ins(%arga: tensor<?xf32, #SV>) + outs(%argx: tensor<f32>) { + ^bb(%a: f32, %x: f32): + %0 = arith.addf %x, %a : f32 + linalg.yield %0 : f32 + } -> tensor<f32> + return %0 : tensor<f32> +} |