diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/call-skip.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/call-skip.ll | 112 |
1 files changed, 97 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/call-skip.ll b/llvm/test/CodeGen/AMDGPU/call-skip.ll index ea2bba1..e2ca278 100644 --- a/llvm/test/CodeGen/AMDGPU/call-skip.ll +++ b/llvm/test/CodeGen/AMDGPU/call-skip.ll @@ -1,4 +1,6 @@ -; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel=0 -mcpu=hawaii < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SDAG %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -global-isel=1 -mcpu=hawaii < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GISEL %s ; A call should be skipped if all lanes are zero, since we don't know ; what side effects should be avoided inside the call. @@ -6,12 +8,37 @@ define hidden void @func() #1 { ret void } -; GCN-LABEL: {{^}}if_call: -; GCN: s_and_saveexec_b64 -; GCN-NEXT: s_cbranch_execz [[END:.LBB[0-9]+_[0-9]+]] -; GCN: s_swappc_b64 -; GCN: [[END]]: define void @if_call(i32 %flag) #0 { +; GCN-LABEL: if_call: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_mov_b32 s20, s33 +; GCN-NEXT: s_mov_b32 s33, s32 +; GCN-NEXT: s_xor_saveexec_b64 s[16:17], -1 +; GCN-NEXT: buffer_store_dword v1, off, s[0:3], s33 ; 4-byte Folded Spill +; GCN-NEXT: s_mov_b64 exec, s[16:17] +; GCN-NEXT: v_writelane_b32 v1, s30, 0 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: s_addk_i32 s32, 0x400 +; GCN-NEXT: v_writelane_b32 v1, s31, 1 +; GCN-NEXT: s_and_saveexec_b64 s[16:17], vcc +; GCN-NEXT: s_cbranch_execz .LBB1_2 +; GCN-NEXT: ; %bb.1: ; %call +; GCN-NEXT: s_getpc_b64 s[18:19] +; GCN-NEXT: s_add_u32 s18, s18, func@rel32@lo+4 +; GCN-NEXT: s_addc_u32 s19, s19, func@rel32@hi+12 +; GCN-NEXT: s_swappc_b64 s[30:31], s[18:19] +; GCN-NEXT: .LBB1_2: ; %end +; GCN-NEXT: s_or_b64 exec, exec, s[16:17] +; GCN-NEXT: v_readlane_b32 s31, v1, 1 +; GCN-NEXT: v_readlane_b32 s30, v1, 0 +; GCN-NEXT: s_mov_b32 s32, s33 +; GCN-NEXT: s_xor_saveexec_b64 s[4:5], -1 +; GCN-NEXT: buffer_load_dword v1, off, s[0:3], s33 ; 4-byte Folded Reload +; GCN-NEXT: s_mov_b64 exec, s[4:5] +; GCN-NEXT: s_mov_b32 s33, s20 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: s_setpc_b64 s[30:31] %cc = icmp eq i32 %flag, 0 br i1 %cc, label %call, label %end @@ -23,12 +50,20 @@ end: ret void } -; GCN-LABEL: {{^}}if_asm: -; GCN: s_and_saveexec_b64 -; GCN-NEXT: s_cbranch_execz [[END:.LBB[0-9]+_[0-9]+]] -; GCN: ; sample asm -; GCN: [[END]]: define void @if_asm(i32 %flag) #0 { +; GCN-LABEL: if_asm: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GCN-NEXT: s_and_saveexec_b64 s[4:5], vcc +; GCN-NEXT: s_cbranch_execz .LBB2_2 +; GCN-NEXT: ; %bb.1: ; %call +; GCN-NEXT: ;;#ASMSTART +; GCN-NEXT: ; sample asm +; GCN-NEXT: ;;#ASMEND +; GCN-NEXT: .LBB2_2: ; %end +; GCN-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN-NEXT: s_setpc_b64 s[30:31] %cc = icmp eq i32 %flag, 0 br i1 %cc, label %call, label %end @@ -40,11 +75,58 @@ end: ret void } -; GCN-LABEL: {{^}}if_call_kernel: -; GCN: s_and_saveexec_b64 -; GCN-NEXT: s_cbranch_execz .LBB3_2 -; GCN: s_swappc_b64 define amdgpu_kernel void @if_call_kernel() #0 { +; SDAG-LABEL: if_call_kernel: +; SDAG: ; %bb.0: +; SDAG-NEXT: s_add_i32 s12, s12, s17 +; SDAG-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; SDAG-NEXT: s_add_u32 s0, s0, s17 +; SDAG-NEXT: s_addc_u32 s1, s1, 0 +; SDAG-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; SDAG-NEXT: s_mov_b32 s32, 0 +; SDAG-NEXT: s_mov_b32 flat_scratch_lo, s13 +; SDAG-NEXT: s_and_saveexec_b64 s[12:13], vcc +; SDAG-NEXT: s_cbranch_execz .LBB3_2 +; SDAG-NEXT: ; %bb.1: ; %call +; SDAG-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; SDAG-NEXT: v_lshlrev_b32_e32 v2, 20, v2 +; SDAG-NEXT: v_or_b32_e32 v0, v0, v1 +; SDAG-NEXT: s_getpc_b64 s[18:19] +; SDAG-NEXT: s_add_u32 s18, s18, func@rel32@lo+4 +; SDAG-NEXT: s_addc_u32 s19, s19, func@rel32@hi+12 +; SDAG-NEXT: v_or_b32_e32 v31, v0, v2 +; SDAG-NEXT: s_mov_b32 s12, s14 +; SDAG-NEXT: s_mov_b32 s13, s15 +; SDAG-NEXT: s_mov_b32 s14, s16 +; SDAG-NEXT: s_swappc_b64 s[30:31], s[18:19] +; SDAG-NEXT: .LBB3_2: ; %end +; SDAG-NEXT: s_endpgm +; +; GISEL-LABEL: if_call_kernel: +; GISEL: ; %bb.0: +; GISEL-NEXT: s_add_i32 s12, s12, s17 +; GISEL-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8 +; GISEL-NEXT: s_add_u32 s0, s0, s17 +; GISEL-NEXT: s_addc_u32 s1, s1, 0 +; GISEL-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; GISEL-NEXT: s_mov_b32 s32, 0 +; GISEL-NEXT: s_mov_b32 flat_scratch_lo, s13 +; GISEL-NEXT: s_and_saveexec_b64 s[12:13], vcc +; GISEL-NEXT: s_cbranch_execz .LBB3_2 +; GISEL-NEXT: ; %bb.1: ; %call +; GISEL-NEXT: v_lshlrev_b32_e32 v1, 10, v1 +; GISEL-NEXT: v_or_b32_e32 v0, v0, v1 +; GISEL-NEXT: v_lshlrev_b32_e32 v1, 20, v2 +; GISEL-NEXT: s_getpc_b64 s[18:19] +; GISEL-NEXT: s_add_u32 s18, s18, func@rel32@lo+4 +; GISEL-NEXT: s_addc_u32 s19, s19, func@rel32@hi+12 +; GISEL-NEXT: v_or_b32_e32 v31, v0, v1 +; GISEL-NEXT: s_mov_b32 s12, s14 +; GISEL-NEXT: s_mov_b32 s13, s15 +; GISEL-NEXT: s_mov_b32 s14, s16 +; GISEL-NEXT: s_swappc_b64 s[30:31], s[18:19] +; GISEL-NEXT: .LBB3_2: ; %end +; GISEL-NEXT: s_endpgm %id = call i32 @llvm.amdgcn.workitem.id.x() %cc = icmp eq i32 %id, 0 br i1 %cc, label %call, label %end |
