aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAart Bik <ajcbik@google.com>2022-08-26 12:31:22 -0700
committerAart Bik <ajcbik@google.com>2022-08-26 13:17:24 -0700
commita8166d8801258fe793c81dd5d6c5f11e67fea280 (patch)
treeffd57e3b481d1b6a6d534d1e04c1d67fc6ba744f
parent59d246e55f56799ef47dcaae9788c0c3e77a2244 (diff)
downloadllvm-a8166d8801258fe793c81dd5d6c5f11e67fea280.zip
llvm-a8166d8801258fe793c81dd5d6c5f11e67fea280.tar.gz
llvm-a8166d8801258fe793c81dd5d6c5f11e67fea280.tar.bz2
[mlir][sparse] move sparse2sparse conversion to own test file
Rationale: We were running *all* conversion tests two times, just to check the difference of one indidivual test in that file. By splitting that test out, we have a much more focused testing setup. Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D132757
-rw-r--r--mlir/test/Dialect/SparseTensor/conversion.mlir26
-rw-r--r--mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir49
2 files changed, 52 insertions, 23 deletions
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index 80cd2f0..336d815 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -1,10 +1,4 @@
-// First use with `kViaCOO` for sparse2sparse conversion (the old way).
-// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=1" \
-// RUN: --canonicalize --cse | FileCheck %s
-//
-// Now again with `kAuto` (the new default).
-// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=0" \
-// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECKAUTO
+// RUN: mlir-opt %s --sparse-tensor-conversion --canonicalize --cse | FileCheck %s
#SparseVector = #sparse_tensor.encoding<{
dimLevelType = ["compressed"]
@@ -233,29 +227,15 @@ func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100
// CHECK-LABEL: func @sparse_convert_1d_ss(
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
-// CHECK-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32
-// CHECK-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32
+// CHECK-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32
// CHECK-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
// CHECK-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
// CHECK-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
// CHECK-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
// CHECK-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
// CHECK-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
-// CHECK: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]])
-// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
-// CHECK: call @delSparseTensorCOOF32(%[[C]])
+// CHECK: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]])
// CHECK: return %[[T]] : !llvm.ptr<i8>
-// CHECKAUTO-LABEL: func @sparse_convert_1d_ss(
-// CHECKAUTO-SAME: %[[A:.*]]: !llvm.ptr<i8>)
-// CHECKAUTO-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32
-// CHECKAUTO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
-// CHECKAUTO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
-// CHECKAUTO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
-// CHECKAUTO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
-// CHECKAUTO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
-// CHECKAUTO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
-// CHECKAUTO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]])
-// CHECKAUTO: return %[[T]] : !llvm.ptr<i8>
func.func @sparse_convert_1d_ss(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
%0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
return %0 : tensor<?xf32, #SparseVector32>
diff --git a/mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir b/mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir
new file mode 100644
index 0000000..488f19c
--- /dev/null
+++ b/mlir/test/Dialect/SparseTensor/conversion_sparse2sparse.mlir
@@ -0,0 +1,49 @@
+// First use with `kViaCOO` for sparse2sparse conversion (the old way).
+// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=1" \
+// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECK-COO
+//
+// Now again with `kAuto` (the new default).
+// RUN: mlir-opt %s --sparse-tensor-conversion="s2s-strategy=0" \
+// RUN: --canonicalize --cse | FileCheck %s -check-prefix=CHECK-AUTO
+
+#SparseVector64 = #sparse_tensor.encoding<{
+ dimLevelType = ["compressed"],
+ pointerBitWidth = 64,
+ indexBitWidth = 64
+}>
+
+#SparseVector32 = #sparse_tensor.encoding<{
+ dimLevelType = ["compressed"],
+ pointerBitWidth = 32,
+ indexBitWidth = 32
+}>
+
+// CHECK-COO-LABEL: func @sparse_convert(
+// CHECK-COO-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK-COO-DAG: %[[ToCOO:.*]] = arith.constant 5 : i32
+// CHECK-COO-DAG: %[[FromCOO:.*]] = arith.constant 2 : i32
+// CHECK-COO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
+// CHECK-COO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-COO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-COO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
+// CHECK-COO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+// CHECK-COO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
+// CHECK-COO: %[[C:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[ToCOO]], %[[A]])
+// CHECK-COO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[FromCOO]], %[[C]])
+// CHECK-COO: call @delSparseTensorCOOF32(%[[C]])
+// CHECK-COO: return %[[T]] : !llvm.ptr<i8>
+// CHECK-AUTO-LABEL: func @sparse_convert(
+// CHECK-AUTO-SAME: %[[A:.*]]: !llvm.ptr<i8>)
+// CHECK-AUTO-DAG: %[[SparseToSparse:.*]] = arith.constant 3 : i32
+// CHECK-AUTO-DAG: %[[P:.*]] = memref.alloca() : memref<1xi8>
+// CHECK-AUTO-DAG: %[[Q:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-AUTO-DAG: %[[R:.*]] = memref.alloca() : memref<1xindex>
+// CHECK-AUTO-DAG: %[[X:.*]] = memref.cast %[[P]] : memref<1xi8> to memref<?xi8>
+// CHECK-AUTO-DAG: %[[Y:.*]] = memref.cast %[[Q]] : memref<1xindex> to memref<?xindex>
+// CHECK-AUTO-DAG: %[[Z:.*]] = memref.cast %[[R]] : memref<1xindex> to memref<?xindex>
+// CHECK-AUTO: %[[T:.*]] = call @newSparseTensor(%[[X]], %[[Y]], %[[Z]], %{{.*}}, %{{.*}}, %{{.*}}, %[[SparseToSparse]], %[[A]])
+// CHECK-AUTO: return %[[T]] : !llvm.ptr<i8>
+func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
+ %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
+ return %0 : tensor<?xf32, #SparseVector32>
+}