aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw-system.ll
blob: fab5d386446d3ce5f179aaedbc4bca3157bc288b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; XUN: llc -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI %s

; FIXME: broken on VI because flat instructions need to be emitted
; instead of addr64 equivalent of the _OFFSET variants.

; Check that moving the pointer out of the resource descriptor to
; vaddr works for atomics.

declare i32 @llvm.amdgcn.workitem.id.x() #1

define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 {
; GCN-LABEL: atomic_max_i32:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT:    s_mov_b32 s11, 0xf000
; GCN-NEXT:    s_mov_b32 s10, 0
; GCN-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
; GCN-NEXT:    v_mov_b32_e32 v2, 0
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    s_mov_b64 s[8:9], s[2:3]
; GCN-NEXT:    buffer_load_dwordx2 v[1:2], v[1:2], s[8:11], 0 addr64 glc
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0
; GCN-NEXT:    s_and_saveexec_b64 s[2:3], vcc
; GCN-NEXT:    s_cbranch_execz .LBB0_4
; GCN-NEXT:  ; %bb.1: ; %atomic
; GCN-NEXT:    s_mov_b32 s8, s10
; GCN-NEXT:    s_mov_b32 s9, s10
; GCN-NEXT:    buffer_load_dword v4, v[1:2], s[8:11], 0 addr64 offset:400
; GCN-NEXT:    s_load_dword s4, s[4:5], 0xf
; GCN-NEXT:    s_mov_b64 s[2:3], 0
; GCN-NEXT:  .LBB0_2: ; %atomicrmw.start
; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_max_i32_e32 v3, s4, v4
; GCN-NEXT:    s_waitcnt expcnt(0)
; GCN-NEXT:    v_mov_b32_e32 v6, v4
; GCN-NEXT:    v_mov_b32_e32 v5, v3
; GCN-NEXT:    buffer_atomic_cmpswap v[5:6], v[1:2], s[8:11], 0 addr64 offset:400 glc
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    buffer_wbinvl1
; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
; GCN-NEXT:    s_or_b64 s[2:3], vcc, s[2:3]
; GCN-NEXT:    v_mov_b32_e32 v4, v5
; GCN-NEXT:    s_andn2_b64 exec, exec, s[2:3]
; GCN-NEXT:    s_cbranch_execnz .LBB0_2
; GCN-NEXT:  ; %bb.3: ; %atomicrmw.end
; GCN-NEXT:    s_or_b64 exec, exec, s[2:3]
; GCN-NEXT:    s_mov_b32 s3, 0xf000
; GCN-NEXT:    s_mov_b32 s2, -1
; GCN-NEXT:    buffer_store_dword v5, off, s[0:3], 0
; GCN-NEXT:  .LBB0_4: ; %exit
; GCN-NEXT:    s_endpgm
  %tid = call i32 @llvm.amdgcn.workitem.id.x()
  %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid
  %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
  %xor = xor i32 %tid, 1
  %cmp = icmp ne i32 %xor, 0
  br i1 %cmp, label %atomic, label %exit

atomic:
  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
  %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
  store i32 %ret, ptr addrspace(1) %out
  br label %exit

exit:
  ret void
}

define amdgpu_kernel void @atomic_max_i32_noret(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 {
; GCN-LABEL: atomic_max_i32_noret:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xb
; GCN-NEXT:    s_mov_b32 s3, 0xf000
; GCN-NEXT:    s_mov_b32 s2, 0
; GCN-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
; GCN-NEXT:    v_mov_b32_e32 v2, 0
; GCN-NEXT:    s_waitcnt lgkmcnt(0)
; GCN-NEXT:    buffer_load_dwordx2 v[1:2], v[1:2], s[0:3], 0 addr64 glc
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0
; GCN-NEXT:    s_and_saveexec_b64 s[0:1], vcc
; GCN-NEXT:    s_cbranch_execz .LBB1_3
; GCN-NEXT:  ; %bb.1: ; %atomic
; GCN-NEXT:    s_mov_b32 s0, s2
; GCN-NEXT:    s_mov_b32 s1, s2
; GCN-NEXT:    buffer_load_dword v4, v[1:2], s[0:3], 0 addr64 offset:400
; GCN-NEXT:    s_load_dword s6, s[4:5], 0xf
; GCN-NEXT:    s_mov_b64 s[4:5], 0
; GCN-NEXT:  .LBB1_2: ; %atomicrmw.start
; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
; GCN-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_max_i32_e32 v3, s6, v4
; GCN-NEXT:    s_waitcnt expcnt(0)
; GCN-NEXT:    v_mov_b32_e32 v6, v4
; GCN-NEXT:    v_mov_b32_e32 v5, v3
; GCN-NEXT:    buffer_atomic_cmpswap v[5:6], v[1:2], s[0:3], 0 addr64 offset:400 glc
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    buffer_wbinvl1
; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v4
; GCN-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
; GCN-NEXT:    v_mov_b32_e32 v4, v5
; GCN-NEXT:    s_andn2_b64 exec, exec, s[4:5]
; GCN-NEXT:    s_cbranch_execnz .LBB1_2
; GCN-NEXT:  .LBB1_3: ; %exit
; GCN-NEXT:    s_endpgm
  %tid = call i32 @llvm.amdgcn.workitem.id.x()
  %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid
  %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep
  %xor = xor i32 %tid, 1
  %cmp = icmp ne i32 %xor, 0
  br i1 %cmp, label %atomic, label %exit

atomic:
  %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100
  %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst
  br label %exit

exit:
  ret void
}

attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }