aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll')
-rw-r--r--llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll16
1 files changed, 3 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
index 64392a1..735720a 100644
--- a/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
+++ b/llvm/test/CodeGen/AMDGPU/scale-offset-flat.ll
@@ -337,21 +337,18 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-LABEL: flat_atomicrmw_b64_rtn_idxprom:
; SDAG: ; %bb.0: ; %entry
; SDAG-NEXT: v_ashrrev_i32_e32 v1, 31, v0
-; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 3, s[0:1]
; SDAG-NEXT: s_mov_b64 s[0:1], src_private_base
; SDAG-NEXT: s_mov_b32 s0, exec_lo
; SDAG-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SDAG-NEXT: s_wait_alu 0xfffe
; SDAG-NEXT: v_cmpx_ne_u32_e64 s1, v3
; SDAG-NEXT: s_xor_b32 s0, exec_lo, s0
; SDAG-NEXT: s_cbranch_execnz .LBB21_3
; SDAG-NEXT: ; %bb.1: ; %Flow
-; SDAG-NEXT: s_wait_alu 0xfffe
; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; SDAG-NEXT: s_cbranch_execnz .LBB21_4
; SDAG-NEXT: .LBB21_2: ; %atomicrmw.phi
-; SDAG-NEXT: s_wait_alu 0xfffe
; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; SDAG-NEXT: s_branch .LBB21_5
@@ -360,7 +357,6 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-NEXT: flat_atomic_add_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; SDAG-NEXT: ; implicit-def: $vgpr2_vgpr3
; SDAG-NEXT: s_wait_xcnt 0x0
-; SDAG-NEXT: s_wait_alu 0xfffe
; SDAG-NEXT: s_and_not1_saveexec_b32 s0, s0
; SDAG-NEXT: s_cbranch_execz .LBB21_2
; SDAG-NEXT: .LBB21_4: ; %atomicrmw.private
@@ -369,10 +365,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; SDAG-NEXT: s_wait_loadcnt 0x0
-; SDAG-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; SDAG-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; SDAG-NEXT: scratch_store_b64 v4, v[2:3], off
; SDAG-NEXT: s_wait_xcnt 0x0
-; SDAG-NEXT: s_wait_alu 0xfffe
; SDAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
; SDAG-NEXT: s_branch .LBB21_5
; SDAG-NEXT: .LBB21_5:
@@ -395,11 +390,9 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; GISEL-NEXT: s_xor_b32 s2, exec_lo, s2
; GISEL-NEXT: s_cbranch_execnz .LBB21_3
; GISEL-NEXT: ; %bb.1: ; %Flow
-; GISEL-NEXT: s_wait_alu 0xfffe
; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2
; GISEL-NEXT: s_cbranch_execnz .LBB21_4
; GISEL-NEXT: .LBB21_2: ; %atomicrmw.phi
-; GISEL-NEXT: s_wait_alu 0xfffe
; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GISEL-NEXT: s_branch .LBB21_5
@@ -408,20 +401,17 @@ define amdgpu_ps <2 x float> @flat_atomicrmw_b64_rtn_idxprom(ptr align 8 inreg %
; GISEL-NEXT: ; implicit-def: $vgpr4_vgpr5
; GISEL-NEXT: flat_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] scale_offset th:TH_ATOMIC_RETURN scope:SCOPE_SYS
; GISEL-NEXT: s_wait_xcnt 0x0
-; GISEL-NEXT: s_wait_alu 0xfffe
; GISEL-NEXT: s_and_not1_saveexec_b32 s0, s2
; GISEL-NEXT: s_cbranch_execz .LBB21_2
; GISEL-NEXT: .LBB21_4: ; %atomicrmw.private
; GISEL-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[4:5]
-; GISEL-NEXT: s_wait_alu 0xfffd
; GISEL-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc_lo
; GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GISEL-NEXT: scratch_load_b64 v[0:1], v4, off
; GISEL-NEXT: s_wait_loadcnt 0x0
-; GISEL-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, 1
+; GISEL-NEXT: v_add_nc_u64_e32 v[2:3], 1, v[0:1]
; GISEL-NEXT: scratch_store_b64 v4, v[2:3], off
; GISEL-NEXT: s_wait_xcnt 0x0
-; GISEL-NEXT: s_wait_alu 0xfffe
; GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GISEL-NEXT: s_branch .LBB21_5
; GISEL-NEXT: .LBB21_5: