aboutsummaryrefslogtreecommitdiff
path: root/mlir/test/Dialect/AMDGPU/amdgpu-fold-memrefs.mlir
blob: 57afa127c9da89ecafe7ed5f8c19f505258422c2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
// RUN: mlir-opt --amdgpu-fold-memrefs-ops --split-input-file %s | FileCheck %s

#gpu_lds_addrspace = 3

// CHECK: func @test_subview_folding
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
func.func @test_subview_folding(%offset_i: index, %offset_j: index) {
  // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<64x64xf16, 3>
  // CHECK: %[[MEM:.*]] = memref.alloc() : memref<64x128xf16>
  // CHECK: %[[C0:.*]] = arith.constant 0 : index
  // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[ARG0]], %[[ARG1]]], %[[LOCAL]][%[[C0]], %[[C0]]]
  // CHECK-SAME: vector<8xf16>, memref<64x128xf16>, memref<64x64xf16, 3>

  %alloc = memref.alloc() : memref<64x64xf16, #gpu_lds_addrspace>
  %mem = memref.alloc() : memref<64x128xf16>
  %subview = memref.subview %mem[0, 0][32, 64][1, 1] : memref<64x128xf16> to memref<32x64xf16, strided<[128, 1]>>
  %c0 = arith.constant 0 : index
  amdgpu.gather_to_lds %subview[%offset_i, %offset_j], %alloc[%c0, %c0]
    : vector<8xf16>, memref<32x64xf16, strided<[128, 1]>>, memref<64x64xf16, #gpu_lds_addrspace>
  func.return
}

// -----

#gpu_lds_addrspace = 3

// CHECK: #[[MAP:.*]] = affine_map<()[s0] -> (s0 + 32)>
// CHECK: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 64)>

// CHECK: func @subview_folding_offset
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
func.func @subview_folding_offset(%offset_i: index, %offset_j: index) {
  // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<64x64xf16, 3>
  // CHECK: %[[MEM:.*]] = memref.alloc() : memref<64x128xf16>
  // CHECK: %[[C0:.*]] = arith.constant 0 : index
  // CHECK: %[[IDX0:.*]] = affine.apply #[[MAP]]()[%[[ARG0]]]
  // CHECK: %[[IDX1:.*]] = affine.apply #[[MAP1]]()[%[[ARG1]]]
  // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[IDX0]], %[[IDX1]]], %[[LOCAL]][%[[C0]], %[[C0]]]
  // CHECK-SAME: vector<8xf16>, memref<64x128xf16>, memref<64x64xf16, 3>

  %alloc = memref.alloc() : memref<64x64xf16, #gpu_lds_addrspace>
  %mem = memref.alloc() : memref<64x128xf16>
  %subview = memref.subview %mem[32, 64][32, 64][1, 1] : memref<64x128xf16> to memref<32x64xf16, strided<[128, 1], offset: 4160>>
  %c0 = arith.constant 0 : index
  amdgpu.gather_to_lds %subview[%offset_i, %offset_j], %alloc[%c0, %c0]
    : vector<8xf16>, memref<32x64xf16, strided<[128, 1], offset: 4160>>, memref<64x64xf16, #gpu_lds_addrspace>
  func.return
}

// -----

#gpu_lds_addrspace = 3

// CHECK: func @test_expand_shape
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
func.func @test_expand_shape(%offset_i: index, %offset_j: index) {
  // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<64x64xf16, 3>
  // CHECK: %[[MEM:.*]] = memref.alloc() : memref<8192xf16>
  // CHECK: %[[C0:.*]] = arith.constant 0 : index
  // CHECK: %[[IDX:.*]] = affine.linearize_index [%[[ARG0]], %[[ARG1]]] by (64, 128) : index
  // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[IDX]]], %[[LOCAL]][%[[C0]], %[[C0]]]
  // CHECK-SAME: vector<8xf16>, memref<8192xf16>, memref<64x64xf16, 3>

  %alloc = memref.alloc() : memref<64x64xf16, #gpu_lds_addrspace>
  %mem = memref.alloc() : memref<8192xf16>
  %expand = memref.expand_shape %mem [[0, 1]] output_shape [64, 128] : memref<8192xf16> into memref<64x128xf16>
  %c0 = arith.constant 0 : index
  amdgpu.gather_to_lds %expand[%offset_i, %offset_j], %alloc[%c0, %c0]
    : vector<8xf16>, memref<64x128xf16>, memref<64x64xf16, #gpu_lds_addrspace>
  func.return
}

// -----

#gpu_lds_addrspace = 3

// CHECK: func @test_collapse_shape
// CHECK-SAME: %[[ARG0:.*]]: index, %[[ARG1:.*]]: index
func.func @test_collapse_shape(%offset_i: index, %offset_j: index) {
  // CHECK: %[[LOCAL:.*]] = memref.alloc() : memref<64x64xf16, 3>
  // CHECK: %[[MEM:.*]] = memref.alloc() : memref<64x128xf16>
  // CHECK: %[[C0:.*]] = arith.constant 0 : index
  // CHECK: %[[INDICES:.*]]:2 = affine.delinearize_index %[[ARG0]] into (64, 128) : index, index
  // CHECK: amdgpu.gather_to_lds %[[MEM]][%[[INDICES]]#0, %[[INDICES]]#1], %[[LOCAL]][%[[C0]], %[[C0]]]
  // CHECK-SAME: vector<8xf16>, memref<64x128xf16>, memref<64x64xf16, 3>

  %alloc = memref.alloc() : memref<64x64xf16, #gpu_lds_addrspace>
  %mem = memref.alloc() : memref<64x128xf16>
  %collapse = memref.collapse_shape %mem [[0, 1]] : memref<64x128xf16> into memref<8192xf16>
  %c0 = arith.constant 0 : index
  amdgpu.gather_to_lds %collapse[%offset_i], %alloc[%c0, %c0]
    : vector<8xf16>, memref<8192xf16>, memref<64x64xf16, #gpu_lds_addrspace>
  func.return
}