diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/infinite-loop.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/infinite-loop.ll | 257 | 
1 files changed, 236 insertions, 21 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll index 3e2e43f..df63592 100644 --- a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll +++ b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll @@ -36,26 +36,60 @@ loop:    br label %loop  } +define amdgpu_kernel void @infinite_loop_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loop_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loop_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP:%.*]] [] +; IR:       loop: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[DUMMYRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP]] [] +; IR:       DummyReturnBlock: +; IR-NEXT:    ret void +; +entry: +  callbr void asm "", ""() to label %loop [] + +loop: +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop [] +} +  define amdgpu_kernel void @infinite_loop_ret(ptr addrspace(1) %out) {  ; SI-LABEL: infinite_loop_ret:  ; SI:       ; %bb.0: ; %entry  ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0  ; SI-NEXT:    s_and_saveexec_b64 s[0:1], vcc -; SI-NEXT:    s_cbranch_execz .LBB1_3 +; SI-NEXT:    s_cbranch_execz .LBB2_3  ; SI-NEXT:  ; %bb.1: ; %loop.preheader  ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9  ; SI-NEXT:    s_mov_b32 s3, 0xf000  ; SI-NEXT:    s_mov_b32 s2, -1  ; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7  ; SI-NEXT:    s_and_b64 vcc, exec, -1 -; SI-NEXT:  .LBB1_2: ; %loop +; SI-NEXT:  .LBB2_2: ; %loop  ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1  ; SI-NEXT:    s_waitcnt lgkmcnt(0)  ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccnz .LBB1_2 -; SI-NEXT:  .LBB1_3: ; %UnifiedReturnBlock +; SI-NEXT:    s_cbranch_vccnz .LBB2_2 +; SI-NEXT:  .LBB2_3: ; %UnifiedReturnBlock  ; SI-NEXT:    s_endpgm  ; IR-LABEL: @infinite_loop_ret(  ; IR-NEXT:  entry: @@ -81,44 +115,93 @@ return:    ret void  } +define amdgpu_kernel void @infinite_loop_ret_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loop_ret_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0 +; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:  ; %bb.1: ; %loop.preheader +; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:  .LBB3_2: ; Inline asm indirect target +; SI-NEXT:    ; %UnifiedReturnBlock +; SI-NEXT:    ; Label of block must be emitted +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loop_ret_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() +; IR-NEXT:    [[COND:%.*]] = icmp eq i32 [[TMP]], 1 +; IR-NEXT:    [[COND32:%.*]] = zext i1 [[COND]] to i32 +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[COND32]]) +; IR-NEXT:            to label [[LOOP:%.*]] [label %UnifiedReturnBlock] +; IR:       loop: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[UNIFIEDRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP]] [] +; IR:       UnifiedReturnBlock: +; IR-NEXT:    ret void +; +entry: +  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() +  %cond = icmp eq i32 %tmp, 1 +  %cond32 = zext i1 %cond to i32 +  callbr void asm "", "r,!i"(i32 %cond32) to label %loop [label %return] + +loop: +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop [] + +return: +  ret void +} +  define amdgpu_kernel void @infinite_loops(ptr addrspace(1) %out) {  ; SI-LABEL: infinite_loops:  ; SI:       ; %bb.0: ; %entry  ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9  ; SI-NEXT:    s_mov_b64 s[2:3], -1 -; SI-NEXT:    s_cbranch_scc1 .LBB2_4 +; SI-NEXT:    s_cbranch_scc1 .LBB4_4  ; SI-NEXT:  ; %bb.1:  ; SI-NEXT:    s_mov_b32 s3, 0xf000  ; SI-NEXT:    s_mov_b32 s2, -1  ; SI-NEXT:    v_mov_b32_e32 v0, 0x378  ; SI-NEXT:    s_and_b64 vcc, exec, -1 -; SI-NEXT:  .LBB2_2: ; %loop2 +; SI-NEXT:  .LBB4_2: ; %loop2  ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1  ; SI-NEXT:    s_waitcnt lgkmcnt(0)  ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccnz .LBB2_2 +; SI-NEXT:    s_cbranch_vccnz .LBB4_2  ; SI-NEXT:  ; %bb.3: ; %Flow  ; SI-NEXT:    s_mov_b64 s[2:3], 0 -; SI-NEXT:  .LBB2_4: ; %Flow2 +; SI-NEXT:  .LBB4_4: ; %Flow2  ; SI-NEXT:    s_and_b64 vcc, exec, s[2:3]  ; SI-NEXT:    s_waitcnt lgkmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccz .LBB2_7 +; SI-NEXT:    s_cbranch_vccz .LBB4_7  ; SI-NEXT:  ; %bb.5:  ; SI-NEXT:    s_mov_b32 s3, 0xf000  ; SI-NEXT:    s_mov_b32 s2, -1  ; SI-NEXT:    s_waitcnt expcnt(0)  ; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7  ; SI-NEXT:    s_and_b64 vcc, exec, 0 -; SI-NEXT:  .LBB2_6: ; %loop1 +; SI-NEXT:  .LBB4_6: ; %loop1  ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1  ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_mov_b64 vcc, vcc -; SI-NEXT:    s_cbranch_vccz .LBB2_6 -; SI-NEXT:  .LBB2_7: ; %DummyReturnBlock +; SI-NEXT:    s_cbranch_vccz .LBB4_6 +; SI-NEXT:  .LBB4_7: ; %DummyReturnBlock  ; SI-NEXT:    s_endpgm  ; IR-LABEL: @infinite_loops(  ; IR-NEXT:  entry: @@ -144,24 +227,78 @@ loop2:    br label %loop2  } +define amdgpu_kernel void @infinite_loops_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loops_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:  ; %bb.1: ; %loop1 +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_endpgm +; SI-NEXT:  .LBB5_2: ; Inline asm indirect target +; SI-NEXT:    ; %loop2.preheader +; SI-NEXT:    ; Label of block must be emitted +; SI-NEXT:    s_mov_b32 s3, 0xf000 +; SI-NEXT:    s_mov_b32 s2, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x378 +; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loops_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    callbr void asm "", "r,!i"(i32 poison) +; IR-NEXT:            to label [[LOOP1:%.*]] [label %loop2] +; IR:       loop1: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[DUMMYRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP1]] [] +; IR:       loop2: +; IR-NEXT:    store volatile i32 888, ptr addrspace(1) [[OUT]], align 4 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK1:%.*]], label [[DUMMYRETURNBLOCK]] +; IR:       TransitionBlock1: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[LOOP2:%.*]] [] +; IR:       DummyReturnBlock: +; IR-NEXT:    ret void +; +entry: +  callbr void asm "", "r,!i"(i32 poison) to label %loop1 [label %loop2] + +loop1: +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop1 [] + +loop2: +  store volatile i32 888, ptr addrspace(1) %out, align 4 +  callbr void asm "", ""() to label %loop2 [] +} +  define amdgpu_kernel void @infinite_loop_nest_ret(ptr addrspace(1) %out) {  ; SI-LABEL: infinite_loop_nest_ret:  ; SI:       ; %bb.0: ; %entry  ; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0  ; SI-NEXT:    s_and_saveexec_b64 s[0:1], vcc -; SI-NEXT:    s_cbranch_execz .LBB3_5 +; SI-NEXT:    s_cbranch_execz .LBB6_5  ; SI-NEXT:  ; %bb.1: ; %outer_loop.preheader  ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9  ; SI-NEXT:    v_cmp_ne_u32_e64 s[0:1], 3, v0  ; SI-NEXT:    s_mov_b32 s7, 0xf000  ; SI-NEXT:    s_mov_b32 s6, -1  ; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 -; SI-NEXT:  .LBB3_2: ; %outer_loop +; SI-NEXT:  .LBB6_2: ; %outer_loop  ; SI-NEXT:    ; =>This Loop Header: Depth=1 -; SI-NEXT:    ; Child Loop BB3_3 Depth 2 +; SI-NEXT:    ; Child Loop BB6_3 Depth 2  ; SI-NEXT:    s_mov_b64 s[2:3], 0 -; SI-NEXT:  .LBB3_3: ; %inner_loop -; SI-NEXT:    ; Parent Loop BB3_2 Depth=1 +; SI-NEXT:  .LBB6_3: ; %inner_loop +; SI-NEXT:    ; Parent Loop BB6_2 Depth=1  ; SI-NEXT:    ; => This Inner Loop Header: Depth=2  ; SI-NEXT:    s_and_b64 s[8:9], exec, s[0:1]  ; SI-NEXT:    s_or_b64 s[2:3], s[8:9], s[2:3] @@ -169,13 +306,13 @@ define amdgpu_kernel void @infinite_loop_nest_ret(ptr addrspace(1) %out) {  ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0  ; SI-NEXT:    s_waitcnt vmcnt(0)  ; SI-NEXT:    s_andn2_b64 exec, exec, s[2:3] -; SI-NEXT:    s_cbranch_execnz .LBB3_3 +; SI-NEXT:    s_cbranch_execnz .LBB6_3  ; SI-NEXT:  ; %bb.4: ; %loop.exit.guard -; SI-NEXT:    ; in Loop: Header=BB3_2 Depth=1 +; SI-NEXT:    ; in Loop: Header=BB6_2 Depth=1  ; SI-NEXT:    s_or_b64 exec, exec, s[2:3]  ; SI-NEXT:    s_mov_b64 vcc, 0 -; SI-NEXT:    s_branch .LBB3_2 -; SI-NEXT:  .LBB3_5: ; %UnifiedReturnBlock +; SI-NEXT:    s_branch .LBB6_2 +; SI-NEXT:  .LBB6_5: ; %UnifiedReturnBlock  ; SI-NEXT:    s_endpgm  ; IR-LABEL: @infinite_loop_nest_ret(  ; IR-NEXT:  entry: @@ -212,4 +349,82 @@ return:    ret void  } +define amdgpu_kernel void @infinite_loop_nest_ret_callbr(ptr addrspace(1) %out) { +; SI-LABEL: infinite_loop_nest_ret_callbr: +; SI:       ; %bb.0: ; %entry +; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0 +; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:  ; %bb.1: ; %outer_loop.preheader +; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9 +; SI-NEXT:    s_mov_b32 s7, 0xf000 +; SI-NEXT:    s_mov_b32 s6, -1 +; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7 +; SI-NEXT:    s_and_b64 s[0:1], exec, 0 +; SI-NEXT:    s_branch .LBB7_3 +; SI-NEXT:  .LBB7_2: ; %loop.exit.guard +; SI-NEXT:    ; in Loop: Header=BB7_3 Depth=1 +; SI-NEXT:    s_and_b64 vcc, exec, s[2:3] +; SI-NEXT:    s_cbranch_vccnz .LBB7_5 +; SI-NEXT:  .LBB7_3: ; %outer_loop +; SI-NEXT:    ; =>This Inner Loop Header: Depth=1 +; SI-NEXT:    ;;#ASMSTART +; SI-NEXT:    ;;#ASMEND +; SI-NEXT:    s_waitcnt lgkmcnt(0) +; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT:    s_waitcnt vmcnt(0) +; SI-NEXT:    s_mov_b64 s[2:3], -1 +; SI-NEXT:    s_mov_b64 vcc, s[0:1] +; SI-NEXT:    s_cbranch_vccz .LBB7_2 +; SI-NEXT:  ; %bb.4: ; %TransitionBlock.target.outer_loop +; SI-NEXT:    ; in Loop: Header=BB7_3 Depth=1 +; SI-NEXT:    s_mov_b64 s[2:3], 0 +; SI-NEXT:    s_branch .LBB7_2 +; SI-NEXT:  .LBB7_5: ; Inline asm indirect target +; SI-NEXT:    ; %UnifiedReturnBlock +; SI-NEXT:    ; Label of block must be emitted +; SI-NEXT:    s_endpgm +; IR-LABEL: @infinite_loop_nest_ret_callbr( +; IR-NEXT:  entry: +; IR-NEXT:    [[TMP:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x() +; IR-NEXT:    [[COND1:%.*]] = icmp ne i32 [[TMP]], 1 +; IR-NEXT:    [[COND1_32:%.*]] = zext i1 [[COND1]] to i32 +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[COND1_32]]) +; IR-NEXT:            to label [[OUTER_LOOP:%.*]] [label %UnifiedReturnBlock] +; IR:       outer_loop: +; IR-NEXT:    callbr void asm "", ""() +; IR-NEXT:            to label [[INNER_LOOP:%.*]] [] +; IR:       inner_loop: +; IR-NEXT:    store volatile i32 999, ptr addrspace(1) [[OUT:%.*]], align 4 +; IR-NEXT:    [[COND3:%.*]] = icmp eq i32 [[TMP]], 3 +; IR-NEXT:    [[COND3_32:%.*]] = zext i1 [[COND3]] to i32 +; IR-NEXT:    br i1 true, label [[TRANSITIONBLOCK:%.*]], label [[UNIFIEDRETURNBLOCK:%.*]] +; IR:       TransitionBlock: +; IR-NEXT:    callbr void asm "", "r,!i"(i32 [[COND3_32]]) +; IR-NEXT:            to label [[INNER_LOOP]] [label %outer_loop] +; IR:       UnifiedReturnBlock: +; IR-NEXT:    ret void +; +entry: +  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() +  %cond1 = icmp ne i32 %tmp, 1  ; avoid following BB optimizing away through the domination +  %cond1_32 = zext i1 %cond1 to i32 +  callbr void asm "", "r,!i"(i32 %cond1_32) to label %outer_loop [label %return] + +outer_loop: +  ; %cond2 = icmp eq i32 %tmp, 2 +  ; br i1 %cond2, label %outer_loop, label %inner_loop +  callbr void asm "", ""() to label %inner_loop [] + +inner_loop:                                     ; preds = %LeafBlock, %LeafBlock1 +  store volatile i32 999, ptr addrspace(1) %out, align 4 +  %cond3 = icmp eq i32 %tmp, 3 +  %cond3_32 = zext i1 %cond3 to i32 +  callbr void asm "", "r,!i"(i32 %cond3_32) to label %inner_loop [label %outer_loop] + +return: +  ret void +} +  declare i32 @llvm.amdgcn.workitem.id.x()  | 
