aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/call-skip.ll
blob: e2ca278d687becaaab2052a7a22f0e51fce9cd08 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel=0 -mcpu=hawaii < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SDAG %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel=1 -mcpu=hawaii < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GISEL %s

; A call should be skipped if all lanes are zero, since we don't know
; what side effects should be avoided inside the call.
define hidden void @func() #1 {
  ret void
}

define void @if_call(i32 %flag) #0 {
; GCN-LABEL: if_call:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    s_mov_b32 s20, s33
; GCN-NEXT:    s_mov_b32 s33, s32
; GCN-NEXT:    s_xor_saveexec_b64 s[16:17], -1
; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill
; GCN-NEXT:    s_mov_b64 exec, s[16:17]
; GCN-NEXT:    v_writelane_b32 v1, s30, 0
; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT:    s_addk_i32 s32, 0x400
; GCN-NEXT:    v_writelane_b32 v1, s31, 1
; GCN-NEXT:    s_and_saveexec_b64 s[16:17], vcc
; GCN-NEXT:    s_cbranch_execz .LBB1_2
; GCN-NEXT:  ; %bb.1: ; %call
; GCN-NEXT:    s_getpc_b64 s[18:19]
; GCN-NEXT:    s_add_u32 s18, s18, func@rel32@lo+4
; GCN-NEXT:    s_addc_u32 s19, s19, func@rel32@hi+12
; GCN-NEXT:    s_swappc_b64 s[30:31], s[18:19]
; GCN-NEXT:  .LBB1_2: ; %end
; GCN-NEXT:    s_or_b64 exec, exec, s[16:17]
; GCN-NEXT:    v_readlane_b32 s31, v1, 1
; GCN-NEXT:    v_readlane_b32 s30, v1, 0
; GCN-NEXT:    s_mov_b32 s32, s33
; GCN-NEXT:    s_xor_saveexec_b64 s[4:5], -1
; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload
; GCN-NEXT:    s_mov_b64 exec, s[4:5]
; GCN-NEXT:    s_mov_b32 s33, s20
; GCN-NEXT:    s_waitcnt vmcnt(0)
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %cc = icmp eq i32 %flag, 0
  br i1 %cc, label %call, label %end

call:
  call void @func()
  br label %end

end:
  ret void
}

define void @if_asm(i32 %flag) #0 {
; GCN-LABEL: if_asm:
; GCN:       ; %bb.0:
; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
; GCN-NEXT:    s_and_saveexec_b64 s[4:5], vcc
; GCN-NEXT:    s_cbranch_execz .LBB2_2
; GCN-NEXT:  ; %bb.1: ; %call
; GCN-NEXT:    ;;#ASMSTART
; GCN-NEXT:    ; sample asm
; GCN-NEXT:    ;;#ASMEND
; GCN-NEXT:  .LBB2_2: ; %end
; GCN-NEXT:    s_or_b64 exec, exec, s[4:5]
; GCN-NEXT:    s_setpc_b64 s[30:31]
  %cc = icmp eq i32 %flag, 0
  br i1 %cc, label %call, label %end

call:
  call void asm sideeffect "; sample asm", ""()
  br label %end

end:
  ret void
}

define amdgpu_kernel void @if_call_kernel() #0 {
; SDAG-LABEL: if_call_kernel:
; SDAG:       ; %bb.0:
; SDAG-NEXT:    s_add_i32 s12, s12, s17
; SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
; SDAG-NEXT:    s_add_u32 s0, s0, s17
; SDAG-NEXT:    s_addc_u32 s1, s1, 0
; SDAG-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
; SDAG-NEXT:    s_mov_b32 s32, 0
; SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13
; SDAG-NEXT:    s_and_saveexec_b64 s[12:13], vcc
; SDAG-NEXT:    s_cbranch_execz .LBB3_2
; SDAG-NEXT:  ; %bb.1: ; %call
; SDAG-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
; SDAG-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
; SDAG-NEXT:    v_or_b32_e32 v0, v0, v1
; SDAG-NEXT:    s_getpc_b64 s[18:19]
; SDAG-NEXT:    s_add_u32 s18, s18, func@rel32@lo+4
; SDAG-NEXT:    s_addc_u32 s19, s19, func@rel32@hi+12
; SDAG-NEXT:    v_or_b32_e32 v31, v0, v2
; SDAG-NEXT:    s_mov_b32 s12, s14
; SDAG-NEXT:    s_mov_b32 s13, s15
; SDAG-NEXT:    s_mov_b32 s14, s16
; SDAG-NEXT:    s_swappc_b64 s[30:31], s[18:19]
; SDAG-NEXT:  .LBB3_2: ; %end
; SDAG-NEXT:    s_endpgm
;
; GISEL-LABEL: if_call_kernel:
; GISEL:       ; %bb.0:
; GISEL-NEXT:    s_add_i32 s12, s12, s17
; GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
; GISEL-NEXT:    s_add_u32 s0, s0, s17
; GISEL-NEXT:    s_addc_u32 s1, s1, 0
; GISEL-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
; GISEL-NEXT:    s_mov_b32 s32, 0
; GISEL-NEXT:    s_mov_b32 flat_scratch_lo, s13
; GISEL-NEXT:    s_and_saveexec_b64 s[12:13], vcc
; GISEL-NEXT:    s_cbranch_execz .LBB3_2
; GISEL-NEXT:  ; %bb.1: ; %call
; GISEL-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
; GISEL-NEXT:    v_or_b32_e32 v0, v0, v1
; GISEL-NEXT:    v_lshlrev_b32_e32 v1, 20, v2
; GISEL-NEXT:    s_getpc_b64 s[18:19]
; GISEL-NEXT:    s_add_u32 s18, s18, func@rel32@lo+4
; GISEL-NEXT:    s_addc_u32 s19, s19, func@rel32@hi+12
; GISEL-NEXT:    v_or_b32_e32 v31, v0, v1
; GISEL-NEXT:    s_mov_b32 s12, s14
; GISEL-NEXT:    s_mov_b32 s13, s15
; GISEL-NEXT:    s_mov_b32 s14, s16
; GISEL-NEXT:    s_swappc_b64 s[30:31], s[18:19]
; GISEL-NEXT:  .LBB3_2: ; %end
; GISEL-NEXT:    s_endpgm
  %id = call i32 @llvm.amdgcn.workitem.id.x()
  %cc = icmp eq i32 %id, 0
  br i1 %cc, label %call, label %end

call:
  call void @func()
  br label %end

end:
  ret void
}

declare i32 @llvm.amdgcn.workitem.id.x() #2

attributes #0 = { nounwind }
attributes #1 = { nounwind noinline }
attributes #2 = { nounwind readnone speculatable }