; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 < %s | FileCheck -check-prefixes=GCN %s define amdgpu_kernel void @copy_to_vreg_1(i32 %0) { ; GCN-LABEL: copy_to_vreg_1: ; GCN: ; %bb.0: ; %._crit_edge ; GCN-NEXT: s_load_dword s4, s[4:5], 0x0 ; GCN-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GCN-NEXT: v_mov_b64_e32 v[2:3], 0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_sub_i32 s5, 1, s4 ; GCN-NEXT: s_cmp_lt_u32 s4, 2 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_and_b64 s[2:3], s[0:1], exec ; GCN-NEXT: s_cselect_b32 s3, s5, 1 ; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0 ; GCN-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] ; GCN-NEXT: s_addc_u32 s0, 1, 0 ; GCN-NEXT: v_readfirstlane_b32 s2, v1 ; GCN-NEXT: s_cmp_ge_u32 s3, s4 ; GCN-NEXT: s_cselect_b32 s4, s0, s2 ; GCN-NEXT: v_mov_b32_e32 v1, 0 ; GCN-NEXT: s_cmp_lg_u64 0, 0 ; GCN-NEXT: s_mov_b64 s[0:1], 0 ; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1] ; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0 ; GCN-NEXT: s_branch .LBB0_3 ; GCN-NEXT: .LBB0_1: ; %Flow ; GCN-NEXT: ; in Loop: Header=BB0_3 Depth=1 ; GCN-NEXT: s_or_b64 exec, exec, s[6:7] ; GCN-NEXT: s_xor_b64 s[8:9], exec, -1 ; GCN-NEXT: .LBB0_2: ; %Flow3 ; GCN-NEXT: ; in Loop: Header=BB0_3 Depth=1 ; GCN-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN-NEXT: s_and_b64 s[4:5], exec, s[8:9] ; GCN-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1] ; GCN-NEXT: s_mov_b32 s4, 0 ; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1] ; GCN-NEXT: s_cbranch_execz .LBB0_8 ; GCN-NEXT: .LBB0_3: ; %.lr.ph27 ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: s_cmp_lg_u32 s4, 0 ; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0 ; GCN-NEXT: s_or_b64 s[8:9], vcc, s[4:5] ; GCN-NEXT: s_xor_b64 s[6:7], s[8:9], -1 ; GCN-NEXT: s_and_saveexec_b64 s[4:5], s[8:9] ; GCN-NEXT: s_cbranch_execz .LBB0_5 ; GCN-NEXT: ; %bb.4: ; %pred.store.if ; GCN-NEXT: ; in Loop: Header=BB0_3 Depth=1 ; GCN-NEXT: s_or_b64 s[6:7], s[6:7], exec ; GCN-NEXT: global_store_byte v[2:3], v1, off ; GCN-NEXT: .LBB0_5: ; %Flow2 ; GCN-NEXT: ; in Loop: Header=BB0_3 Depth=1 ; GCN-NEXT: s_or_b64 exec, exec, s[4:5] ; GCN-NEXT: s_mov_b64 s[8:9], -1 ; GCN-NEXT: s_and_saveexec_b64 s[4:5], s[6:7] ; GCN-NEXT: s_cbranch_execz .LBB0_2 ; GCN-NEXT: ; %bb.6: ; %pred.store.continue ; GCN-NEXT: ; in Loop: Header=BB0_3 Depth=1 ; GCN-NEXT: s_and_saveexec_b64 s[6:7], s[2:3] ; GCN-NEXT: s_xor_b64 s[6:7], exec, s[6:7] ; GCN-NEXT: s_cbranch_execz .LBB0_1 ; GCN-NEXT: ; %bb.7: ; %pred.store.if41 ; GCN-NEXT: ; in Loop: Header=BB0_3 Depth=1 ; GCN-NEXT: global_store_byte v[2:3], v1, off ; GCN-NEXT: s_branch .LBB0_1 ; GCN-NEXT: .LBB0_8: ; %DummyReturnBlock ; GCN-NEXT: s_endpgm ._crit_edge: %id.x = tail call i32 @llvm.amdgcn.workitem.id.x() %div = udiv i32 1, %0 br label %.lr.ph27 .lr.ph27: ; preds = %pred.store.if41, %pred.store.continue, %._crit_edge %iv = phi i32 [ %div, %._crit_edge ], [ 0, %pred.store.if41 ], [ 0, %pred.store.continue ] %cmp = icmp ugt i32 %iv, 0 %broadcast.splatinsert37 = insertelement <4 x i1> zeroinitializer, i1 %cmp, i64 0 %.zext = zext i32 %id.x to i64 %broadcast.splatinsert39 = insertelement <4 x i64> zeroinitializer, i64 %.zext, i64 0 %cmp.1 = icmp uge <4 x i64> %broadcast.splatinsert39, splat (i64 1) %or = or <4 x i1> %cmp.1, %broadcast.splatinsert37 %extract = extractelement <4 x i1> %or, i64 0 br i1 %extract, label %pred.store.if, label %pred.store.continue pred.store.if: ; preds = %.lr.ph27 store i8 0, ptr addrspace(1) null, align 64 br label %pred.store.continue pred.store.continue: ; preds = %pred.store.if, %.lr.ph27 %extract.1 = extractelement <4 x i1> %or, i64 1 br i1 %extract.1, label %pred.store.if41, label %.lr.ph27 pred.store.if41: ; preds = %pred.store.continue store i8 0, ptr addrspace(1) null, align 64 br label %.lr.ph27 } declare noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x() #0 attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }