From 996905d8152def16ca2fa1322367e493ac6eef5e Mon Sep 17 00:00:00 2001 From: Peiming Liu Date: Mon, 17 Jun 2024 11:35:23 -0700 Subject: Revert "[mlir][sparse] implement lowering rules for IterateOp." (#95826) Reverts llvm/llvm-project#95286 --- .../SparseTensor/sparse_iteration_to_scf.mlir | 54 ++++++---------------- 1 file changed, 13 insertions(+), 41 deletions(-) (limited to 'mlir/test') diff --git a/mlir/test/Dialect/SparseTensor/sparse_iteration_to_scf.mlir b/mlir/test/Dialect/SparseTensor/sparse_iteration_to_scf.mlir index 77a0e89..5fcd661 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_iteration_to_scf.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_iteration_to_scf.mlir @@ -1,5 +1,4 @@ // RUN: mlir-opt %s --lower-sparse-iteration-to-scf | FileCheck %s -// RUN: mlir-opt %s --sparse-space-collapse --lower-sparse-iteration-to-scf | FileCheck %s --check-prefix COLLAPSED #COO = #sparse_tensor.encoding<{ map = (i, j) -> ( @@ -8,44 +7,17 @@ ) }> -// CHECK-LABEL: @sparse_iteration_to_scf -// // deduplication -// CHECK: scf.while {{.*}} { -// CHECK: } do { -// CHECK: } -// CHECK: scf.while {{.*}} { -// CHECK: } do { -// // actual computation -// CHECK: scf.for {{.*}} { -// CHECK: arith.addi -// CHECK: } -// // deduplication -// CHECK: scf.while {{.*}} { -// CHECK: } do { -// CHECK: } -// CHECK: scf.yield -// CHECK: } -// CHECK: return - -// COLLAPSED-LABEL: @sparse_iteration_to_scf -// COLLAPSED: %[[RET:.*]] = scf.for {{.*}} { -// COLLAPSED: %[[VAL:.*]] = arith.addi -// COLLAPSED: scf.yield %[[VAL]] : index -// COLLAPSED: } -// COLLAPSED: return %[[RET]] : index -func.func @sparse_iteration_to_scf(%sp : tensor<4x8xf32, #COO>) -> index { - %i = arith.constant 0 : index - %c1 = arith.constant 1 : index - %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 - : tensor<4x8xf32, #COO> -> !sparse_tensor.iter_space<#COO, lvls = 0> - %r1 = sparse_tensor.iterate %it1 in %l1 iter_args(%outer = %i): !sparse_tensor.iter_space<#COO, lvls = 0 to 1> -> index { - %l2 = sparse_tensor.extract_iteration_space %sp at %it1 lvls = 1 - : tensor<4x8xf32, #COO>, !sparse_tensor.iterator<#COO, lvls = 0 to 1> -> !sparse_tensor.iter_space<#COO, lvls = 1> - %r2 = sparse_tensor.iterate %it2 in %l2 iter_args(%inner = %outer): !sparse_tensor.iter_space<#COO, lvls = 1 to 2> -> index { - %k = arith.addi %inner, %c1 : index - sparse_tensor.yield %k : index - } - sparse_tensor.yield %r2 : index - } - return %r1 : index +// CHECK-LABEL: func.func @sparse_1D_space( +// CHECK-SAME: %[[VAL_0:.*]]: tensor) -> !sparse_tensor.iter_space<#sparse{{[0-9]*}}, lvls = 0> { +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[LVL_SIZE:.*]] = sparse_tensor.lvl %[[VAL_0]], %[[C0]] : tensor +// CHECK: %[[POS_MEM:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor to memref +// CHECK: %[[CRD_MEM:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor to memref +// CHECK: %[[POS_LO:.*]] = memref.load %[[POS_MEM]]{{\[}}%[[C0]]] : memref +// CHECK: %[[POS_HI:.*]] = memref.load %[[POS_MEM]]{{\[}}%[[C1]]] : memref +// CHECK: %[[ITER_SPACE:.*]] = builtin.unrealized_conversion_cast %[[POS_MEM]], %[[CRD_MEM]], %[[LVL_SIZE]], %[[POS_LO]], %[[POS_HI]] +func.func @sparse_1D_space(%sp : tensor) -> !sparse_tensor.iter_space<#COO, lvls = 0> { + %l1 = sparse_tensor.extract_iteration_space %sp lvls = 0 : tensor -> !sparse_tensor.iter_space<#COO, lvls = 0> + return %l1 : !sparse_tensor.iter_space<#COO, lvls = 0> } -- cgit v1.1