; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s ; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s ; Test that doing a shift of a pointer with a constant add will be ; folded into the constant offset addressing mode even if the add has ; multiple uses. This is relevant to accessing 2 separate, adjacent ; LDS globals. declare i32 @llvm.amdgcn.workitem.id.x() #1 @lds0 = addrspace(3) global [512 x float] poison, align 4 @lds1 = addrspace(3) global [512 x float] poison, align 4 ; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8 define amdgpu_kernel void @load_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: load_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: ds_read_b32 v1, v1 offset:8 ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: load_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: ds_read_b32 v1, v1 offset:8 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 %val0 = load float, ptr addrspace(3) %arrayidx0, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 store float %val0, ptr addrspace(1) %out ret void } ; Make sure once the first use is folded into the addressing mode, the ; remaining add use goes through the normal shl + add constant fold. define amdgpu_kernel void @load_shl_base_lds_1(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: load_shl_base_lds_1: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: ds_read_b32 v1, v0 offset:8 ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 8, v0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: load_shl_base_lds_1: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: ds_read_b32 v1, v0 offset:8 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 8, v0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 %val0 = load float, ptr addrspace(3) %arrayidx0, align 4 %shl_add_use = shl i32 %idx.0, 2 store i32 %shl_add_use, ptr addrspace(1) %add_use, align 4 store float %val0, ptr addrspace(1) %out ret void } @maxlds = addrspace(3) global [65536 x i8] poison, align 4 define amdgpu_kernel void @load_shl_base_lds_max_offset(ptr addrspace(1) %out, ptr addrspace(3) %lds, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: load_shl_base_lds_max_offset: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 ; CI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: ds_read_u8 v1, v0 offset:65535 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 ; CI-NEXT: s_mov_b32 s6, s2 ; CI-NEXT: s_mov_b32 s7, s3 ; CI-NEXT: v_add_i32_e32 v0, vcc, 0xffff, v0 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; CI-NEXT: buffer_store_byte v1, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: load_shl_base_lds_max_offset: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: ds_read_u8 v1, v0 offset:65535 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: v_add_u32_e32 v0, vcc, 0xffff, v0 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; VI-NEXT: buffer_store_byte v1, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 65535 %arrayidx0 = getelementptr inbounds [65536 x i8], ptr addrspace(3) @maxlds, i32 0, i32 %idx.0 %val0 = load i8, ptr addrspace(3) %arrayidx0 store i32 %idx.0, ptr addrspace(1) %add_use store i8 %val0, ptr addrspace(1) %out ret void } ; The two globals are placed adjacent in memory, so the same base ; pointer can be used with an offset into the second one. define amdgpu_kernel void @load_shl_base_lds_2(ptr addrspace(1) %out) #0 { ; CI-LABEL: load_shl_base_lds_2: ; CI: ; %bb.0: ; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: ds_read2st64_b32 v[0:1], v0 offset0:1 offset1:9 ; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: v_add_f32_e32 v0, v0, v1 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: load_shl_base_lds_2: ; VI: ; %bb.0: ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: ds_read2st64_b32 v[0:1], v0 offset0:1 offset1:9 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_add_f32_e32 v0, v0, v1 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 64 %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 %val0 = load float, ptr addrspace(3) %arrayidx0, align 4 %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds1, i32 0, i32 %idx.0 %val1 = load float, ptr addrspace(3) %arrayidx1, align 4 %sum = fadd float %val0, %val1 store float %sum, ptr addrspace(1) %out, align 4 ret void } define amdgpu_kernel void @store_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: store_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xb ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 ; CI-NEXT: v_add_i32_e32 v1, vcc, 2, v0 ; CI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 1.0 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: ds_write_b32 v0, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_store_dword v1, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: store_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: v_add_u32_e32 v1, vcc, 2, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 1.0 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: ds_write_b32 v0, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_store_dword v1, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds0, i32 0, i32 %idx.0 store float 1.0, ptr addrspace(3) %arrayidx0, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } ; -------------------------------------------------------------------------------- ; Atomics. @lds2 = addrspace(3) global [512 x i32] poison, align 4 ; define amdgpu_kernel void @atomic_load_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 ; %idx.0 = add nsw i32 %tid.x, 2 ; %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 ; %val = load atomic i32, ptr addrspace(3) %arrayidx0 seq_cst, align 4 ; store i32 %val, ptr addrspace(1) %out, align 4 ; store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ; ret void ; } define amdgpu_kernel void @atomic_cmpxchg_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use, i32 %swap) #0 { ; CI-LABEL: atomic_cmpxchg_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: s_load_dword s8, s[4:5], 0xd ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 7 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: v_mov_b32_e32 v3, s8 ; CI-NEXT: ds_cmpst_rtn_b32 v1, v1, v2, v3 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_cmpxchg_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: s_load_dword s8, s[4:5], 0x34 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 7 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v3, s8 ; VI-NEXT: ds_cmpst_rtn_b32 v1, v1, v2, v3 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %pair = cmpxchg ptr addrspace(3) %arrayidx0, i32 7, i32 %swap seq_cst monotonic %result = extractvalue { i32, i1 } %pair, 0 store i32 %result, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_swap_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_swap_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_wrxchg_rtn_b32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_swap_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_wrxchg_rtn_b32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw xchg ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_add_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_add_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_add_rtn_u32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_add_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_add_rtn_u32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw add ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_sub_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_sub_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_sub_rtn_u32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_sub_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_sub_rtn_u32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw sub ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_and_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_and_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_and_rtn_b32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_and_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_and_rtn_b32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw and ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_or_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_or_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_or_rtn_b32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_or_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_or_rtn_b32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw or ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_xor_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_xor_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_xor_rtn_b32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_xor_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_xor_rtn_b32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw xor ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } ; define amdgpu_kernel void @atomic_nand_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 ; %idx.0 = add nsw i32 %tid.x, 2 ; %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 ; %val = atomicrmw nand ptr addrspace(3) %arrayidx0, i32 3 seq_cst ; store i32 %val, ptr addrspace(1) %out, align 4 ; store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ; ret void ; } define amdgpu_kernel void @atomic_min_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_min_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_min_rtn_i32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_min_rtn_i32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw min ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_max_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_max_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_max_rtn_i32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_max_rtn_i32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw max ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_umin_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_umin_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_min_rtn_u32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_min_rtn_u32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw umin ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_umax_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_umax_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 3 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_max_rtn_u32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 3 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_max_rtn_u32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw umax ptr addrspace(3) %arrayidx0, i32 3 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_inc_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_inc_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 31 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_inc_rtn_u32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_inc_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 31 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_inc_rtn_u32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw uinc_wrap ptr addrspace(3) %arrayidx0, i32 31 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define amdgpu_kernel void @atomic_dec_shl_base_lds_0(ptr addrspace(1) %out, ptr addrspace(1) %add_use) #0 { ; CI-LABEL: atomic_dec_shl_base_lds_0: ; CI: ; %bb.0: ; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 ; CI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; CI-NEXT: v_mov_b32_e32 v2, 31 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: ds_dec_rtn_u32 v1, v1, v2 offset:8 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, -1 ; CI-NEXT: s_mov_b32 s4, s0 ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: s_mov_b32 s0, s2 ; CI-NEXT: s_mov_b32 s1, s3 ; CI-NEXT: s_mov_b32 s2, s6 ; CI-NEXT: s_mov_b32 s3, s7 ; CI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 ; CI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_dec_shl_base_lds_0: ; VI: ; %bb.0: ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 ; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; VI-NEXT: v_mov_b32_e32 v2, 31 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: ds_dec_rtn_u32 v1, v1, v2 offset:8 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s0 ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: s_mov_b32 s0, s2 ; VI-NEXT: s_mov_b32 s1, s3 ; VI-NEXT: s_mov_b32 s2, s6 ; VI-NEXT: s_mov_b32 s3, s7 ; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 ; VI-NEXT: buffer_store_dword v1, off, s[4:7], 0 ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 %idx.0 = add nsw i32 %tid.x, 2 %arrayidx0 = getelementptr inbounds [512 x i32], ptr addrspace(3) @lds2, i32 0, i32 %idx.0 %val = atomicrmw udec_wrap ptr addrspace(3) %arrayidx0, i32 31 seq_cst store i32 %val, ptr addrspace(1) %out, align 4 store i32 %idx.0, ptr addrspace(1) %add_use, align 4 ret void } define void @shl_add_ptr_combine_2use_lds(i32 %idx) #0 { ; GCN-LABEL: shl_add_ptr_combine_2use_lds: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0 ; GCN-NEXT: v_mov_b32_e32 v2, 9 ; GCN-NEXT: s_mov_b32 m0, -1 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GCN-NEXT: ds_write_b32 v1, v2 offset:32 ; GCN-NEXT: v_mov_b32_e32 v1, 10 ; GCN-NEXT: ds_write_b32 v0, v1 offset:64 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %idx.add = add nuw i32 %idx, 4 %shl0 = shl i32 %idx.add, 3 %shl1 = shl i32 %idx.add, 4 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(3) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(3) store volatile i32 9, ptr addrspace(3) %ptr0 store volatile i32 10, ptr addrspace(3) %ptr1 ret void } define void @shl_add_ptr_combine_2use_max_lds_offset(i32 %idx) #0 { ; CI-LABEL: shl_add_ptr_combine_2use_max_lds_offset: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; CI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; CI-NEXT: v_mov_b32_e32 v2, 9 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: v_add_i32_e32 v1, vcc, 0x1fff0, v1 ; CI-NEXT: ds_write_b32 v0, v2 offset:65528 ; CI-NEXT: v_mov_b32_e32 v0, 10 ; CI-NEXT: ds_write_b32 v1, v0 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: shl_add_ptr_combine_2use_max_lds_offset: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; VI-NEXT: v_mov_b32_e32 v2, 9 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x1fff0, v1 ; VI-NEXT: ds_write_b32 v0, v2 offset:65528 ; VI-NEXT: v_mov_b32_e32 v0, 10 ; VI-NEXT: ds_write_b32 v1, v0 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] %idx.add = add nuw i32 %idx, 8191 %shl0 = shl i32 %idx.add, 3 %shl1 = shl i32 %idx.add, 4 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(3) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(3) store volatile i32 9, ptr addrspace(3) %ptr0 store volatile i32 10, ptr addrspace(3) %ptr1 ret void } define void @shl_add_ptr_combine_2use_both_max_lds_offset(i32 %idx) #0 { ; CI-LABEL: shl_add_ptr_combine_2use_both_max_lds_offset: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: v_add_i32_e32 v0, vcc, 0x1000, v0 ; CI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; CI-NEXT: v_mov_b32_e32 v2, 9 ; CI-NEXT: s_mov_b32 m0, -1 ; CI-NEXT: v_lshlrev_b32_e32 v0, 5, v0 ; CI-NEXT: ds_write_b32 v1, v2 ; CI-NEXT: v_mov_b32_e32 v1, 10 ; CI-NEXT: ds_write_b32 v0, v1 ; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: shl_add_ptr_combine_2use_both_max_lds_offset: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x1000, v0 ; VI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; VI-NEXT: v_mov_b32_e32 v2, 9 ; VI-NEXT: s_mov_b32 m0, -1 ; VI-NEXT: v_lshlrev_b32_e32 v0, 5, v0 ; VI-NEXT: ds_write_b32 v1, v2 ; VI-NEXT: v_mov_b32_e32 v1, 10 ; VI-NEXT: ds_write_b32 v0, v1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] %idx.add = add nuw i32 %idx, 4096 %shl0 = shl i32 %idx.add, 4 %shl1 = shl i32 %idx.add, 5 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(3) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(3) store volatile i32 9, ptr addrspace(3) %ptr0 store volatile i32 10, ptr addrspace(3) %ptr1 ret void } define void @shl_add_ptr_combine_2use_private(i16 zeroext %idx.arg) #0 { ; GCN-LABEL: shl_add_ptr_combine_2use_private: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_lshlrev_b32_e32 v1, 2, v0 ; GCN-NEXT: v_mov_b32_e32 v2, 9 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GCN-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen offset:16 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v1, 10 ; GCN-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:32 ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %idx = zext i16 %idx.arg to i32 %idx.add = add nuw i32 %idx, 4 %shl0 = shl i32 %idx.add, 2 %shl1 = shl i32 %idx.add, 3 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(5) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(5) store volatile i32 9, ptr addrspace(5) %ptr0 store volatile i32 10, ptr addrspace(5) %ptr1 ret void } define void @shl_add_ptr_combine_2use_max_private_offset(i16 zeroext %idx.arg) #0 { ; CI-LABEL: shl_add_ptr_combine_2use_max_private_offset: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; CI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; CI-NEXT: v_mov_b32_e32 v2, 9 ; CI-NEXT: v_add_i32_e32 v1, vcc, 0x1ff0, v1 ; CI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4088 ; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: v_mov_b32_e32 v0, 10 ; CI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: shl_add_ptr_combine_2use_max_private_offset: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; VI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; VI-NEXT: v_mov_b32_e32 v2, 9 ; VI-NEXT: v_add_u32_e32 v1, vcc, 0x1ff0, v1 ; VI-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4088 ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, 10 ; VI-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] %idx = zext i16 %idx.arg to i32 %idx.add = add nuw i32 %idx, 511 %shl0 = shl i32 %idx.add, 3 %shl1 = shl i32 %idx.add, 4 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(5) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(5) store volatile i32 9, ptr addrspace(5) %ptr0 store volatile i32 10, ptr addrspace(5) %ptr1 ret void } define void @shl_add_ptr_combine_2use_both_max_private_offset(i16 zeroext %idx.arg) #0 { ; CI-LABEL: shl_add_ptr_combine_2use_both_max_private_offset: ; CI: ; %bb.0: ; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CI-NEXT: v_add_i32_e32 v0, vcc, 0x100, v0 ; CI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; CI-NEXT: v_mov_b32_e32 v2, 9 ; CI-NEXT: v_lshlrev_b32_e32 v0, 5, v0 ; CI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: v_mov_b32_e32 v1, 10 ; CI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: shl_add_ptr_combine_2use_both_max_private_offset: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; VI-NEXT: v_add_u32_e32 v0, vcc, 0x100, v0 ; VI-NEXT: v_lshlrev_b32_e32 v1, 4, v0 ; VI-NEXT: v_mov_b32_e32 v2, 9 ; VI-NEXT: v_lshlrev_b32_e32 v0, 5, v0 ; VI-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: v_mov_b32_e32 v1, 10 ; VI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: s_setpc_b64 s[30:31] %idx = zext i16 %idx.arg to i32 %idx.add = add nuw i32 %idx, 256 %shl0 = shl i32 %idx.add, 4 %shl1 = shl i32 %idx.add, 5 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(5) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(5) store volatile i32 9, ptr addrspace(5) %ptr0 store volatile i32 10, ptr addrspace(5) %ptr1 ret void } define void @shl_or_ptr_combine_2use_lds(i32 %idx) #0 { ; GCN-LABEL: shl_or_ptr_combine_2use_lds: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_lshlrev_b32_e32 v1, 1, v0 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GCN-NEXT: v_mov_b32_e32 v2, 9 ; GCN-NEXT: s_mov_b32 m0, -1 ; GCN-NEXT: v_lshlrev_b32_e32 v1, 4, v1 ; GCN-NEXT: ds_write_b32 v0, v2 offset:8 ; GCN-NEXT: v_mov_b32_e32 v0, 10 ; GCN-NEXT: ds_write_b32 v1, v0 offset:16 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %idx.shl = shl i32 %idx, 1 %idx.add = or i32 %idx.shl, 1 %shl0 = shl i32 %idx.add, 3 %shl1 = shl i32 %idx.add, 4 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(3) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(3) store volatile i32 9, ptr addrspace(3) %ptr0 store volatile i32 10, ptr addrspace(3) %ptr1 ret void } define void @shl_or_ptr_not_combine_2use_lds(i32 %idx) #0 { ; GCN-LABEL: shl_or_ptr_not_combine_2use_lds: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_or_b32_e32 v0, 1, v0 ; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0 ; GCN-NEXT: v_mov_b32_e32 v2, 9 ; GCN-NEXT: s_mov_b32 m0, -1 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GCN-NEXT: ds_write_b32 v1, v2 ; GCN-NEXT: v_mov_b32_e32 v1, 10 ; GCN-NEXT: ds_write_b32 v0, v1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %idx.add = or i32 %idx, 1 %shl0 = shl i32 %idx.add, 3 %shl1 = shl i32 %idx.add, 4 %ptr0 = inttoptr i32 %shl0 to ptr addrspace(3) %ptr1 = inttoptr i32 %shl1 to ptr addrspace(3) store volatile i32 9, ptr addrspace(3) %ptr0 store volatile i32 10, ptr addrspace(3) %ptr1 ret void } attributes #0 = { nounwind } attributes #1 = { nounwind readnone }