; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -mcpu=gfx802 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,GFX8 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -asm-verbose=0 < %s | FileCheck -check-prefixes=GCN,GFX9 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=-back-off-barrier -asm-verbose=0 < %s | FileCheck -check-prefix=GCN %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-back-off-barrier -asm-verbose=0 < %s | FileCheck -check-prefix=GCN %s define amdgpu_kernel void @barrier_vmcnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: barrier_vmcnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v3, s1 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 ; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc ; GFX8-NEXT: flat_load_dword v4, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_global: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_load_dword v2, v1, s[0:1] ; GFX9-NEXT: v_add_u32_e32 v1, 1, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[0:1] ; GFX9-NEXT: v_mov_b32_e32 v3, s1 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp1 %tmp4 = load i32, ptr addrspace(1) %tmp3, align 4 fence syncscope("singlethread") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("singlethread") acquire %tmp5 = add nuw nsw i64 %tmp2, 4294967296 %tmp6 = lshr exact i64 %tmp5, 32 %tmp7 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp6 store i32 %tmp4, ptr addrspace(1) %tmp7, align 4 ret void } define amdgpu_kernel void @barrier_vscnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: barrier_vscnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, s1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc ; GFX8-NEXT: flat_store_dword v[2:3], v1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v3, 1 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vscnt_global: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: global_store_dword v[2:3], v1, off ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_mov_b32_e32 v3, 1 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: global_store_dword v[0:1], v3, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = add nuw nsw i64 %tmp2, 8589934592 %tmp4 = lshr exact i64 %tmp3, 32 %tmp5 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp4 store i32 0, ptr addrspace(1) %tmp5, align 4 fence syncscope("singlethread") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("singlethread") acquire %tmp6 = add nuw nsw i64 %tmp2, 4294967296 %tmp7 = lshr exact i64 %tmp6, 32 %tmp8 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp7 store i32 1, ptr addrspace(1) %tmp8, align 4 ret void } define amdgpu_kernel void @barrier_vmcnt_vscnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: barrier_vmcnt_vscnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, s1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc ; GFX8-NEXT: flat_store_dword v[2:3], v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc ; GFX8-NEXT: flat_load_dword v3, v[2:3] ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_vscnt_global: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: global_store_dword v[2:3], v1, off ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX9-NEXT: global_load_dword v3, v2, s[0:1] ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: global_store_dword v[0:1], v3, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = add nuw nsw i64 %tmp2, 8589934592 %tmp4 = lshr exact i64 %tmp3, 32 %tmp5 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp4 store i32 0, ptr addrspace(1) %tmp5, align 4 %tmp6 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp1 %tmp7 = load i32, ptr addrspace(1) %tmp6, align 4 fence syncscope("singlethread") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("singlethread") acquire %tmp8 = add nuw nsw i64 %tmp2, 4294967296 %tmp9 = lshr exact i64 %tmp8, 32 %tmp10 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp9 store i32 %tmp7, ptr addrspace(1) %tmp10, align 4 ret void } define amdgpu_kernel void @barrier_vmcnt_flat(ptr %arg) { ; GFX8-LABEL: barrier_vmcnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v3, s1 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 ; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc ; GFX8-NEXT: flat_load_dword v4, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_flat: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v3, s1 ; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc ; GFX9-NEXT: flat_load_dword v4, v[1:2] ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: flat_store_dword v[0:1], v4 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = getelementptr inbounds i32, ptr %arg, i64 %tmp1 %tmp4 = load i32, ptr %tmp3, align 4 fence syncscope("singlethread") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("singlethread") acquire %tmp5 = add nuw nsw i64 %tmp2, 4294967296 %tmp6 = lshr exact i64 %tmp5, 32 %tmp7 = getelementptr inbounds i32, ptr %arg, i64 %tmp6 store i32 %tmp4, ptr %tmp7, align 4 ret void } define amdgpu_kernel void @barrier_vscnt_flat(ptr %arg) { ; GFX8-LABEL: barrier_vscnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, s1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc ; GFX8-NEXT: flat_store_dword v[2:3], v1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v3, 1 ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vscnt_flat: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: flat_store_dword v[2:3], v1 ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_mov_b32_e32 v3, 1 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: flat_store_dword v[0:1], v3 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = add nuw nsw i64 %tmp2, 8589934592 %tmp4 = lshr exact i64 %tmp3, 32 %tmp5 = getelementptr inbounds i32, ptr %arg, i64 %tmp4 store i32 0, ptr %tmp5, align 4 fence syncscope("singlethread") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("singlethread") acquire %tmp6 = add nuw nsw i64 %tmp2, 4294967296 %tmp7 = lshr exact i64 %tmp6, 32 %tmp8 = getelementptr inbounds i32, ptr %arg, i64 %tmp7 store i32 1, ptr %tmp8, align 4 ret void } define amdgpu_kernel void @barrier_vmcnt_vscnt_flat(ptr %arg) { ; GFX8-LABEL: barrier_vmcnt_vscnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, s1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc ; GFX8-NEXT: flat_store_dword v[2:3], v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc ; GFX8-NEXT: flat_load_dword v3, v[2:3] ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_vscnt_flat: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: flat_store_dword v[2:3], v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc ; GFX9-NEXT: flat_load_dword v3, v[2:3] ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: flat_store_dword v[0:1], v3 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = add nuw nsw i64 %tmp2, 8589934592 %tmp4 = lshr exact i64 %tmp3, 32 %tmp5 = getelementptr inbounds i32, ptr %arg, i64 %tmp4 store i32 0, ptr %tmp5, align 4 %tmp6 = getelementptr inbounds i32, ptr %arg, i64 %tmp1 %tmp7 = load i32, ptr %tmp6, align 4 fence syncscope("singlethread") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("singlethread") acquire %tmp8 = add nuw nsw i64 %tmp2, 4294967296 %tmp9 = lshr exact i64 %tmp8, 32 %tmp10 = getelementptr inbounds i32, ptr %arg, i64 %tmp9 store i32 %tmp7, ptr %tmp10, align 4 ret void } define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) { ; GFX8-LABEL: barrier_vmcnt_vscnt_flat_workgroup: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 2, v0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v4, s1 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc ; GFX8-NEXT: flat_store_dword v[2:3], v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v2 ; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v4, vcc ; GFX8-NEXT: flat_load_dword v3, v[2:3] ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_barrier ; GFX8-NEXT: flat_store_dword v[0:1], v3 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: barrier_vmcnt_vscnt_flat_workgroup: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 2, v0 ; GFX9-NEXT: v_lshrrev_b64 v[2:3], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v4, v3, vcc ; GFX9-NEXT: flat_store_dword v[2:3], v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0 ; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v2 ; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v4, vcc ; GFX9-NEXT: flat_load_dword v3, v[2:3] ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_barrier ; GFX9-NEXT: flat_store_dword v[0:1], v3 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = add nuw nsw i64 %tmp2, 8589934592 %tmp4 = lshr exact i64 %tmp3, 32 %tmp5 = getelementptr inbounds i32, ptr %arg, i64 %tmp4 store i32 0, ptr %tmp5, align 4 %tmp6 = getelementptr inbounds i32, ptr %arg, i64 %tmp1 %tmp7 = load i32, ptr %tmp6, align 4 fence syncscope("workgroup") release tail call void @llvm.amdgcn.s.barrier() fence syncscope("workgroup") acquire %tmp8 = add nuw nsw i64 %tmp2, 4294967296 %tmp9 = lshr exact i64 %tmp8, 32 %tmp10 = getelementptr inbounds i32, ptr %arg, i64 %tmp9 store i32 %tmp7, ptr %tmp10, align 4 ret void } define amdgpu_kernel void @load_vmcnt_global(ptr addrspace(1) %arg) { ; GFX8-LABEL: load_vmcnt_global: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v3, s1 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 ; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc ; GFX8-NEXT: flat_load_dword v4, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: load_vmcnt_global: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_load_dword v2, v1, s[0:1] ; GFX9-NEXT: v_add_u32_e32 v1, 1, v0 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[0:1] ; GFX9-NEXT: v_mov_b32_e32 v3, s1 ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp1 %tmp4 = load i32, ptr addrspace(1) %tmp3, align 4 %tmp5 = add nuw nsw i64 %tmp2, 4294967296 %tmp6 = lshr exact i64 %tmp5, 32 %tmp7 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %tmp6 store i32 %tmp4, ptr addrspace(1) %tmp7, align 4 ret void } define amdgpu_kernel void @load_vmcnt_flat(ptr %arg) { ; GFX8-LABEL: load_vmcnt_flat: ; GFX8: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v3, s1 ; GFX8-NEXT: v_add_u32_e32 v1, vcc, s0, v1 ; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v3, vcc ; GFX8-NEXT: flat_load_dword v4, v[1:2] ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0 ; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0 ; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_store_dword v[0:1], v4 ; GFX8-NEXT: s_endpgm ; ; GFX9-LABEL: load_vmcnt_flat: ; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v3, s1 ; GFX9-NEXT: v_add_co_u32_e32 v1, vcc, s0, v1 ; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc ; GFX9-NEXT: flat_load_dword v4, v[1:2] ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: v_add_u32_e32 v2, 1, v0 ; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2] ; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 ; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: flat_store_dword v[0:1], v4 ; GFX9-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() %tmp1 = zext i32 %tmp to i64 %tmp2 = shl nuw nsw i64 %tmp1, 32 %tmp3 = getelementptr inbounds i32, ptr %arg, i64 %tmp1 %tmp4 = load i32, ptr %tmp3, align 4 %tmp5 = add nuw nsw i64 %tmp2, 4294967296 %tmp6 = lshr exact i64 %tmp5, 32 %tmp7 = getelementptr inbounds i32, ptr %arg, i64 %tmp6 store i32 %tmp4, ptr %tmp7, align 4 ret void } define void @store_vscnt_private(ptr addrspace(5) %p) { ; GFX8-LABEL: store_vscnt_private: ; GFX8: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: store_vscnt_private: ; GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] store i32 0, ptr addrspace(5) %p ret void } define void @store_vscnt_global(ptr addrspace(1) %p) { ; GFX8-LABEL: store_vscnt_global: ; GFX8: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: flat_store_dword v[0:1], v2 ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: store_vscnt_global: ; GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] store i32 0, ptr addrspace(1) %p ret void } define void @store_vscnt_flat(ptr %p) { ; GFX8-LABEL: store_vscnt_flat: ; GFX8: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: flat_store_dword v[0:1], v2 ; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX8-NEXT: s_setpc_b64 s[30:31] ; ; GFX9-LABEL: store_vscnt_flat: ; GFX9: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: flat_store_dword v[0:1], v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_setpc_b64 s[30:31] store i32 0, ptr %p ret void } define void @function_prologue() { ; GCN-LABEL: function_prologue: ; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] ret void } declare void @llvm.amdgcn.s.barrier() declare i32 @llvm.amdgcn.workitem.id.x()