1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -O3 --amdgpu-lower-module-lds-strategy=module < %s | FileCheck -check-prefix=GCN %s
; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
@a = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4
@b = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4
@c = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4
define amdgpu_kernel void @ds_load_stores_aainfo(ptr addrspace(1) %arg, i32 %i) {
; GCN-LABEL: ds_load_stores_aainfo:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dword s0, s[4:5], 0x2c
; GCN-NEXT: v_mov_b32_e32 v0, 1
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_lshl_b32 s0, s0, 2
; GCN-NEXT: v_mov_b32_e32 v4, s0
; GCN-NEXT: ds_read2_b32 v[2:3], v4 offset1:1
; GCN-NEXT: ds_write_b64 v1, v[0:1] offset:512
; GCN-NEXT: ds_read2_b32 v[4:5], v4 offset0:64 offset1:65
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0)
; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0)
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4
; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
; GCN-NEXT: global_store_dwordx2 v1, v[2:3], s[0:1]
; GCN-NEXT: s_endpgm
; CHECK-LABEL: define amdgpu_kernel void @ds_load_stores_aainfo(
; CHECK-SAME: ptr addrspace(1) [[ARG:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[BB:.*:]]
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [64 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.ds_load_stores_aainfo.lds, i32 0, i32 [[I]]
; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [64 x i32], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_DS_LOAD_STORES_AAINFO_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.ds_load_stores_aainfo.lds, i32 0, i32 1), i32 0, i32 [[I]]
; CHECK-NEXT: [[VAL_A:%.*]] = load i64, ptr addrspace(3) [[GEP_A]], align 4, !tbaa [[TBAA1:![0-9]+]], !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]]
; CHECK-NEXT: [[VAL_B:%.*]] = load i64, ptr addrspace(3) [[GEP_B]], align 4, !tbaa [[TBAA1]], !alias.scope [[META12:![0-9]+]], !noalias [[META13:![0-9]+]]
; CHECK-NEXT: store i64 1, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_DS_LOAD_STORES_AAINFO_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.ds_load_stores_aainfo.lds, i32 0, i32 2), align 16, !tbaa [[TBAA1]], !alias.scope [[META14:![0-9]+]], !noalias [[META15:![0-9]+]]
; CHECK-NEXT: [[VAL:%.*]] = add i64 [[VAL_A]], [[VAL_B]]
; CHECK-NEXT: store i64 [[VAL]], ptr addrspace(1) [[ARG]], align 4
; CHECK-NEXT: tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
; CHECK-NEXT: tail call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
; CHECK-NEXT: tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
; CHECK-NEXT: ret void
;
bb:
%gep.a = getelementptr inbounds [64 x i32], ptr addrspace(3) @a, i32 0, i32 %i
%gep.b = getelementptr inbounds [64 x i32], ptr addrspace(3) @b, i32 0, i32 %i
%val.a = load i64, ptr addrspace(3) %gep.a, align 4, !tbaa !0, !noalias !5
%val.b = load i64, ptr addrspace(3) %gep.b, align 4, !tbaa !0, !noalias !5
store i64 1, ptr addrspace(3) @c, align 4, !tbaa !0, !noalias !2
%val = add i64 %val.a, %val.b
store i64 %val, ptr addrspace(1) %arg, align 4
tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
tail call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0)
tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0)
ret void
}
!0 = !{!"omnipotent char", !1, i64 0}
!1 = !{!1}
!2 = !{!3}
!3 = distinct !{!3, !4}
!4 = distinct !{!4}
!5 = !{!3}
;.
; CHECK: [[TBAA1]] = !{[[META2:![0-9]+]], [[META2]], i64 0, i64 0}
; CHECK: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]}
; CHECK: [[META3]] = distinct !{[[META3]]}
; CHECK: [[META4]] = !{[[META5:![0-9]+]]}
; CHECK: [[META5]] = distinct !{[[META5]], [[META6:![0-9]+]]}
; CHECK: [[META6]] = distinct !{[[META6]]}
; CHECK: [[META7]] = !{[[META8:![0-9]+]], [[META10:![0-9]+]], [[META11:![0-9]+]]}
; CHECK: [[META8]] = distinct !{[[META8]], [[META9:![0-9]+]]}
; CHECK: [[META9]] = distinct !{[[META9]]}
; CHECK: [[META10]] = distinct !{[[META10]], [[META6]]}
; CHECK: [[META11]] = distinct !{[[META11]], [[META6]]}
; CHECK: [[META12]] = !{[[META10]]}
; CHECK: [[META13]] = !{[[META8]], [[META5]], [[META11]]}
; CHECK: [[META14]] = !{[[META11]]}
; CHECK: [[META15]] = !{[[META8]], [[META5]], [[META10]]}
;.
|