aboutsummaryrefslogtreecommitdiff
path: root/mlir/test/Dialect/SparseTensor/fold.mlir
blob: 0153d444215e3e95d5d6ff83b1da0c4060db1302 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
// RUN: mlir-opt %s  --canonicalize --cse | FileCheck %s

#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>

// CHECK-LABEL: func @sparse_nop_dense2dense_convert(
//  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
//   CHECK-NOT: sparse_tensor.convert
//       CHECK: return %[[A]] : tensor<64xf32>
func.func @sparse_nop_dense2dense_convert(%arg0: tensor<64xf32>) -> tensor<64xf32> {
  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32>
  return %0 : tensor<64xf32>
}

// CHECK-LABEL: func @sparse_dce_convert(
//  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
//   CHECK-NOT: sparse_tensor.convert
//       CHECK: return
func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
  return
}

// CHECK-LABEL: func @sparse_dce_getters(
//  CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse{{[0-9]*}}>)
//   CHECK-NOT: sparse_tensor.positions
//   CHECK-NOT: sparse_tensor.coordinates
//   CHECK-NOT: sparse_tensor.values
//       CHECK: return
func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
  %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
  %1 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
  %2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref<?xf32>
  return
}

// CHECK-LABEL: func @sparse_concat_dce(
//   CHECK-NOT: sparse_tensor.concatenate
//       CHECK: return
func.func @sparse_concat_dce(%arg0: tensor<2xf64, #SparseVector>,
                             %arg1: tensor<3xf64, #SparseVector>,
                             %arg2: tensor<4xf64, #SparseVector>) {
  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
       : tensor<2xf64, #SparseVector>,
         tensor<3xf64, #SparseVector>,
         tensor<4xf64, #SparseVector> to tensor<9xf64, #SparseVector>
  return
}

// CHECK-LABEL: func @sparse_get_specifier_dce_fold(
//  CHECK-SAME:  %[[A0:.*]]: !sparse_tensor.storage_specifier
//  CHECK-SAME:  %[[A1:.*]]: index,
//  CHECK-SAME:  %[[A2:.*]]: index)
//   CHECK-NOT:  sparse_tensor.storage_specifier.set
//   CHECK-NOT:  sparse_tensor.storage_specifier.get
//       CHECK:  return %[[A1]]
func.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index, %arg2: index) -> index {
  %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
       : !sparse_tensor.storage_specifier<#SparseVector>
  %1 = sparse_tensor.storage_specifier.set %0 pos_mem_sz at 0 with %arg2
       : !sparse_tensor.storage_specifier<#SparseVector>
  %2 = sparse_tensor.storage_specifier.get %1 lvl_sz at 0
       : !sparse_tensor.storage_specifier<#SparseVector>
  return %2 : index
}



#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>

// CHECK-LABEL: func @sparse_reorder_coo(
//  CHECK-SAME: %[[A:.*]]: tensor<?x?xf32, #sparse{{[0-9]*}}>
//   CHECK-NOT: %[[R:.*]] = sparse_tensor.reorder_coo
//       CHECK: return %[[A]]
func.func @sparse_reorder_coo(%arg0 : tensor<?x?xf32, #COO>) -> tensor<?x?xf32, #COO> {
  %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor<?x?xf32, #COO> to tensor<?x?xf32, #COO>
  return %ret : tensor<?x?xf32, #COO>
}


#BSR = #sparse_tensor.encoding<{
  map = ( i, j ) ->
  ( i floordiv 2 : dense,
    j floordiv 3 : compressed,
    i mod 2      : dense,
    j mod 3      : dense
  )
}>

// CHECK-LABEL: func @sparse_crd_translate(
//   CHECK-NOT:   sparse_tensor.crd_translate
func.func @sparse_crd_translate(%arg0: index, %arg1: index) -> (index, index) {
  %l0, %l1, %l2, %l3 = sparse_tensor.crd_translate dim_to_lvl [%arg0, %arg1] as #BSR : index, index, index, index
  %d0, %d1 = sparse_tensor.crd_translate lvl_to_dim [%l0, %l1, %l2, %l3] as #BSR : index, index
  return  %d0, %d1 : index, index
}

// CHECK-LABEL:   func.func @sparse_lvl_0(
// CHECK:           %[[C5:.*]] = arith.constant 5 : index
// CHECK:           return %[[C5]] : index
func.func @sparse_lvl_0(%t : tensor<10x?xi32, #BSR>) -> index {
  %lvl = arith.constant 0 : index
  %l0 = sparse_tensor.lvl %t, %lvl : tensor<10x?xi32, #BSR>
  return  %l0 : index
}

// CHECK-LABEL:   func.func @sparse_lvl_3(
// CHECK:           %[[C3:.*]] = arith.constant 3 : index
// CHECK:           return %[[C3]] : index
func.func @sparse_lvl_3(%t : tensor<?x?xi32, #BSR>) -> index {
  %lvl = arith.constant 3 : index
  %l0 = sparse_tensor.lvl %t, %lvl : tensor<?x?xi32, #BSR>
  return  %l0 : index
}

#DSDD = #sparse_tensor.encoding<{
  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
}>


// CHECK-LABEL:   func.func @sparse_reinterpret_map(
// CHECK-NOT: sparse_tensor.reinterpret_map
func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<6x12xi32, #BSR> {
  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
                                         to tensor<3x4x2x3xi32, #DSDD>
  %t2 = sparse_tensor.reinterpret_map %t1 : tensor<3x4x2x3xi32, #DSDD>
                                         to tensor<6x12xi32, #BSR>
  return %t2 : tensor<6x12xi32, #BSR>
}