aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r--llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll98
-rw-r--r--llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/addsub64_carry.ll192
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-attributor-min-agpr-alloc.ll (renamed from llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll)374
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll452
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll790
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll57
-rw-r--r--llvm/test/CodeGen/AMDGPU/carryout-selection.ll322
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_int24.ll313
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_uint24.ll1492
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv64.ll470
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem64.ll488
-rw-r--r--llvm/test/CodeGen/AMDGPU/uaddo.ll201
-rw-r--r--llvm/test/CodeGen/AMDGPU/uaddsat.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll472
-rw-r--r--llvm/test/CodeGen/AMDGPU/urem64.ll379
-rw-r--r--llvm/test/CodeGen/AMDGPU/usubo.ll201
-rw-r--r--llvm/test/CodeGen/AMDGPU/usubsat.ll54
18 files changed, 4788 insertions, 1638 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index 7cc5051..003aa04 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -8759,9 +8759,8 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v6
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8780,20 +8779,19 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX90A-NEXT: s_cbranch_execz .LBB113_6
; GFX90A-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX90A-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc
+; GFX90A-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v6
+; GFX90A-NEXT: v_sub_co_u32_e32 v3, vcc, v1, v6
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT: buffer_store_dword v0, v4, s[0:3], 0 offen offset:4
-; GFX90A-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen
+; GFX90A-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v7, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB113_6: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: ;;#ASMSTART
@@ -8827,10 +8825,9 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v6
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8856,11 +8853,11 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v6
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
; GFX950-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT: s_nop 0
; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
; GFX950-NEXT: scratch_store_dwordx2 v4, v[2:3], off
; GFX950-NEXT: .LBB113_6: ; %atomicrmw.phi
; GFX950-NEXT: s_or_b64 exec, exec, s[0:1]
@@ -8900,9 +8897,8 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
; GFX90A-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -8918,18 +8914,17 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX90A-NEXT: s_cbranch_execz .LBB114_6
; GFX90A-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX90A-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc
-; GFX90A-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; GFX90A-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v4, v2
+; GFX90A-NEXT: v_sub_co_u32_e32 v1, vcc, v4, v2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v5, v3, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
-; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_subb_co_u32_e32 v2, vcc, v5, v3, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB114_6: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: ;;#ASMSTART
@@ -8962,10 +8957,9 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -8988,7 +8982,6 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -17064,9 +17057,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17085,20 +17077,19 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s4, s4, -1
-; GFX90A-NEXT: v_mov_b32_e32 v6, s4
-; GFX90A-NEXT: buffer_load_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_mov_b32_e32 v0, s4
+; GFX90A-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v4
+; GFX90A-NEXT: v_sub_co_u32_e32 v3, vcc, v1, v4
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen
+; GFX90A-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB221_6: ; %atomicrmw.phi
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use a[0:1]
@@ -17131,10 +17122,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17158,11 +17148,11 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v4
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
; GFX950-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT: s_nop 0
; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
; GFX950-NEXT: scratch_store_dwordx2 off, v[2:3], s0
; GFX950-NEXT: .LBB221_6: ; %atomicrmw.phi
; GFX950-NEXT: ;;#ASMSTART
@@ -17201,9 +17191,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17226,7 +17215,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v0
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: buffer_store_dword v0, v4, s[0:3], 0 offen
@@ -17262,10 +17250,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17286,7 +17273,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
index c98fff9..34a4899 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
@@ -5804,9 +5804,8 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6
; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5839,10 +5838,9 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5880,9 +5878,8 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
; GFX90A-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -5911,10 +5908,9 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -11573,9 +11569,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11609,10 +11604,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11651,9 +11645,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -11683,10 +11676,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index d326966..b72eba8 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -17,12 +17,9 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
; CHECK-LABEL: v_add64_32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v5, vcc, v0, v2
-; CHECK-NEXT: v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
-; CHECK-NEXT: v_mov_b32_e32 v0, v5
+; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; CHECK-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
-; CHECK-NEXT: v_mov_b32_e32 v1, v6
; CHECK-NEXT: s_setpc_b64 s[30:31]
%sum64 = add i64 %val64A, %val64B
%obit = icmp ult i64 %sum64, %val64A
@@ -38,16 +35,14 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT: v_add_co_u32_e64 v4, s[4:5], v0, v4
; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v1, v0
+; CHECK-NEXT: v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: v_mov_b32_e32 v3, v2
+; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
%pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -63,16 +58,14 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_sub_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT: v_sub_co_u32_e64 v4, s[4:5], v0, v4
; CHECK-NEXT: v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v1, v0
+; CHECK-NEXT: v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: v_mov_b32_e32 v3, v2
+; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
%pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -87,10 +80,9 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-LABEL: v_uadd_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -109,7 +101,6 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
@@ -147,10 +138,9 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-LABEL: v_usub_p1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, -1, v0
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v0
+; CHECK-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -167,10 +157,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-LABEL: v_usub_n1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, 1, v0
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT: v_subrev_co_u32_e32 v0, vcc, -1, v0
+; CHECK-NEXT: v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc
+; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -190,15 +179,13 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B, i32 inreg %val32) {
; CHECK-LABEL: s_add64_32:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s6, s0, s2
-; CHECK-NEXT: v_mov_b32_e32 v0, s0
-; CHECK-NEXT: s_addc_u32 s7, s1, s3
-; CHECK-NEXT: v_mov_b32_e32 v1, s1
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CHECK-NEXT: s_mov_b32 s0, s6
-; CHECK-NEXT: s_cmp_lg_u64 vcc, 0
+; CHECK-NEXT: s_add_u32 s0, s0, s2
+; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT: s_addc_u32 s1, s1, s3
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_addc_u32 s2, s4, 0
-; CHECK-NEXT: s_mov_b32 s1, s7
; CHECK-NEXT: ; return to shader part epilog
%sum64 = add i64 %val64A, %val64B
%obit = icmp ult i64 %sum64, %val64A
@@ -212,24 +199,24 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_v2i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s6, s2, s6
-; CHECK-NEXT: v_mov_b32_e32 v9, s3
-; CHECK-NEXT: s_addc_u32 s7, s3, s7
-; CHECK-NEXT: v_mov_b32_e32 v8, s2
-; CHECK-NEXT: s_add_u32 s4, s0, s4
-; CHECK-NEXT: v_mov_b32_e32 v7, s1
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT: s_addc_u32 s5, s1, s5
-; CHECK-NEXT: v_mov_b32_e32 v6, s0
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT: v_readfirstlane_b32 s2, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT: v_readfirstlane_b32 s0, v6
-; CHECK-NEXT: v_mov_b32_e32 v2, s4
-; CHECK-NEXT: v_mov_b32_e32 v3, s5
-; CHECK-NEXT: v_mov_b32_e32 v4, s6
-; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: s_add_u32 s10, s2, s6
+; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT: s_addc_u32 s8, s3, s7
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_add_u32 s0, s0, s4
+; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT: s_addc_u32 s1, s1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT: v_readfirstlane_b32 s0, v7
+; CHECK-NEXT: v_readfirstlane_b32 s2, v6
+; CHECK-NEXT: v_mov_b32_e32 v4, s10
+; CHECK-NEXT: v_mov_b32_e32 v5, s8
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_mov_b32 s3, s2
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -246,24 +233,24 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_v2i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_sub_u32 s6, s2, s6
-; CHECK-NEXT: v_mov_b32_e32 v9, s3
-; CHECK-NEXT: s_subb_u32 s7, s3, s7
-; CHECK-NEXT: v_mov_b32_e32 v8, s2
-; CHECK-NEXT: s_sub_u32 s4, s0, s4
-; CHECK-NEXT: v_mov_b32_e32 v7, s1
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT: s_subb_u32 s5, s1, s5
-; CHECK-NEXT: v_mov_b32_e32 v6, s0
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT: v_readfirstlane_b32 s2, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT: v_readfirstlane_b32 s0, v6
-; CHECK-NEXT: v_mov_b32_e32 v2, s4
-; CHECK-NEXT: v_mov_b32_e32 v3, s5
-; CHECK-NEXT: v_mov_b32_e32 v4, s6
-; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: s_sub_u32 s10, s2, s6
+; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT: s_subb_u32 s8, s3, s7
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_sub_u32 s0, s0, s4
+; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT: s_subb_u32 s1, s1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT: v_readfirstlane_b32 s0, v7
+; CHECK-NEXT: v_readfirstlane_b32 s2, v6
+; CHECK-NEXT: v_mov_b32_e32 v4, s10
+; CHECK-NEXT: v_mov_b32_e32 v5, s8
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_mov_b32 s3, s2
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -280,15 +267,15 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s2, s0, s2
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_addc_u32 s3, s1, s3
+; CHECK-NEXT: s_add_u32 s0, s0, s2
+; CHECK-NEXT: s_cselect_b64 s[4:5], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0
+; CHECK-NEXT: s_addc_u32 s1, s1, s3
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: v_mov_b32_e32 v5, s3
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v4, s2
-; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -305,10 +292,11 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_p1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_add_u32 s0, s0, 1
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_addc_u32 s1, s1, 0
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: v_mov_b32_e32 v2, s0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
@@ -350,15 +338,15 @@ define amdgpu_ps i64 @s_uadd_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_p1:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s2, s0, -1
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_addc_u32 s3, s1, -1
+; CHECK-NEXT: s_sub_u32 s0, s0, 1
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT: s_subb_u32 s1, s1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: v_mov_b32_e32 v5, s3
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v4, s2
-; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -374,15 +362,15 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_n1:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s2, s0, 1
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_addc_u32 s3, s1, 0
+; CHECK-NEXT: s_sub_u32 s0, s0, -1
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT: s_subb_u32 s1, s1, -1
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: v_mov_b32_e32 v5, s3
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v4, s2
-; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-min-agpr-alloc.ll
index 2ad6e68..f730199 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-no-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-attributor-min-agpr-alloc.ll
@@ -70,7 +70,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_def() {
define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_def_tuple(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: [[DEF:%.*]] = call i64 asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -118,7 +118,7 @@ define amdgpu_kernel void @kernel_uses_asm_physreg() {
define amdgpu_kernel void @kernel_uses_asm_physreg_tuple() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_tuple(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR2]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -154,7 +154,7 @@ define void @func_uses_asm_physreg_agpr() {
define void @func_uses_asm_physreg_agpr_tuple() {
; CHECK-LABEL: define void @func_uses_asm_physreg_agpr_tuple(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR2]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -168,7 +168,7 @@ declare void @unknown()
define amdgpu_kernel void @kernel_calls_extern() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR3:[0-9]+]] {
; CHECK-NEXT: call void @unknown()
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -180,8 +180,8 @@ define amdgpu_kernel void @kernel_calls_extern() {
define amdgpu_kernel void @kernel_calls_extern_marked_callsite() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_extern_marked_callsite(
-; CHECK-SAME: ) #[[ATTR1]] {
-; CHECK-NEXT: call void @unknown() #[[ATTR10:[0-9]+]]
+; CHECK-SAME: ) #[[ATTR3]] {
+; CHECK-NEXT: call void @unknown() #[[ATTR29:[0-9]+]]
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
@@ -192,7 +192,7 @@ define amdgpu_kernel void @kernel_calls_extern_marked_callsite() {
define amdgpu_kernel void @kernel_calls_indirect(ptr %indirect) {
; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect(
-; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: call void [[INDIRECT]]()
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -204,8 +204,8 @@ define amdgpu_kernel void @kernel_calls_indirect(ptr %indirect) {
define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(ptr %indirect) {
; CHECK-LABEL: define amdgpu_kernel void @kernel_calls_indirect_marked_callsite(
-; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR1]] {
-; CHECK-NEXT: call void [[INDIRECT]]() #[[ATTR10]]
+; CHECK-SAME: ptr [[INDIRECT:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: call void [[INDIRECT]]() #[[ATTR29]]
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
@@ -316,7 +316,7 @@ define amdgpu_kernel void @kernel_calls_workitem_id_x(ptr addrspace(1) %out) {
define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) {
; CHECK-LABEL: define amdgpu_kernel void @indirect_calls_none_agpr(
-; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @empty, ptr @also_empty
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[FPTR]], @also_empty
; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]]
@@ -342,7 +342,7 @@ define amdgpu_kernel void @indirect_calls_none_agpr(i1 %cond) {
define amdgpu_kernel void @kernel_uses_asm_virtreg_def_struct_0() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def_struct_0(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR2]] {
; CHECK-NEXT: [[DEF:%.*]] = call { i32, i32 } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -354,7 +354,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_def_struct_0() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_use_struct_1() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_use_struct_1(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR5:[0-9]+]] {
; CHECK-NEXT: [[DEF:%.*]] = call { i32, <2 x i32> } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -378,7 +378,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_use_struct_2() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_ptr_ty() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_ptr_ty(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR2]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -390,7 +390,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_ptr_ty() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_def_ptr_ty() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def_ptr_ty(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR2]] {
; CHECK-NEXT: [[DEF:%.*]] = call ptr asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -402,7 +402,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_def_ptr_ty() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_def_vector_ptr_ty() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def_vector_ptr_ty(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR5]] {
; CHECK-NEXT: [[DEF:%.*]] = call <2 x ptr> asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -414,7 +414,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_def_vector_ptr_ty() {
define amdgpu_kernel void @kernel_uses_asm_physreg_def_struct_0() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_def_struct_0(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR6:[0-9]+]] {
; CHECK-NEXT: [[DEF:%.*]] = call { i32, i32 } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -426,7 +426,7 @@ define amdgpu_kernel void @kernel_uses_asm_physreg_def_struct_0() {
define amdgpu_kernel void @kernel_uses_asm_clobber() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_clobber(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR7:[0-9]+]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -438,7 +438,7 @@ define amdgpu_kernel void @kernel_uses_asm_clobber() {
define amdgpu_kernel void @kernel_uses_asm_clobber_tuple() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_clobber_tuple(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR8:[0-9]+]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -450,7 +450,7 @@ define amdgpu_kernel void @kernel_uses_asm_clobber_tuple() {
define amdgpu_kernel void @kernel_uses_asm_clobber_oob() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_clobber_oob(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR9:[0-9]+]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -462,7 +462,7 @@ define amdgpu_kernel void @kernel_uses_asm_clobber_oob() {
define amdgpu_kernel void @kernel_uses_asm_clobber_max() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_clobber_max(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR9]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -474,7 +474,7 @@ define amdgpu_kernel void @kernel_uses_asm_clobber_max() {
define amdgpu_kernel void @kernel_uses_asm_physreg_oob() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_physreg_oob(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR9]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -486,7 +486,7 @@ define amdgpu_kernel void @kernel_uses_asm_physreg_oob() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_def_max_ty() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_def_max_ty(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR10:[0-9]+]] {
; CHECK-NEXT: [[DEF:%.*]] = call <32 x i32> asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -498,7 +498,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_def_max_ty() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_use_max_ty() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_use_max_ty(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR10]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -510,7 +510,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_use_max_ty() {
define amdgpu_kernel void @kernel_uses_asm_virtreg_use_def_max_ty() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_asm_virtreg_use_def_max_ty(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR10]] {
; CHECK-NEXT: [[DEF:%.*]] = call <32 x i32> asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -522,7 +522,7 @@ define amdgpu_kernel void @kernel_uses_asm_virtreg_use_def_max_ty() {
define amdgpu_kernel void @vreg_use_exceeds_register_file() {
; CHECK-LABEL: define amdgpu_kernel void @vreg_use_exceeds_register_file(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR9]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -534,7 +534,7 @@ define amdgpu_kernel void @vreg_use_exceeds_register_file() {
define amdgpu_kernel void @vreg_def_exceeds_register_file() {
; CHECK-LABEL: define amdgpu_kernel void @vreg_def_exceeds_register_file(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR9]] {
; CHECK-NEXT: [[DEF:%.*]] = call <257 x i32> asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -546,7 +546,7 @@ define amdgpu_kernel void @vreg_def_exceeds_register_file() {
define amdgpu_kernel void @multiple() {
; CHECK-LABEL: define amdgpu_kernel void @multiple(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR10]] {
; CHECK-NEXT: [[DEF:%.*]] = call { <16 x i32>, <8 x i32>, <8 x i32> } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -558,7 +558,7 @@ define amdgpu_kernel void @multiple() {
define amdgpu_kernel void @earlyclobber_0() {
; CHECK-LABEL: define amdgpu_kernel void @earlyclobber_0(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR11:[0-9]+]] {
; CHECK-NEXT: [[DEF:%.*]] = call <8 x i32> asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -570,7 +570,7 @@ define amdgpu_kernel void @earlyclobber_0() {
define amdgpu_kernel void @earlyclobber_1() {
; CHECK-LABEL: define amdgpu_kernel void @earlyclobber_1(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR12:[0-9]+]] {
; CHECK-NEXT: [[DEF:%.*]] = call { <8 x i32>, <16 x i32> } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -582,7 +582,7 @@ define amdgpu_kernel void @earlyclobber_1() {
define amdgpu_kernel void @physreg_a32__vreg_a256__vreg_a512() {
; CHECK-LABEL: define amdgpu_kernel void @physreg_a32__vreg_a256__vreg_a512(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR13:[0-9]+]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -594,7 +594,7 @@ define amdgpu_kernel void @physreg_a32__vreg_a256__vreg_a512() {
define amdgpu_kernel void @physreg_def_a32__def_vreg_a256__def_vreg_a512() {
; CHECK-LABEL: define amdgpu_kernel void @physreg_def_a32__def_vreg_a256__def_vreg_a512(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR13]] {
; CHECK-NEXT: [[TMP1:%.*]] = call { i32, <8 x i32>, <16 x i32> } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -606,7 +606,7 @@ define amdgpu_kernel void @physreg_def_a32__def_vreg_a256__def_vreg_a512() {
define amdgpu_kernel void @physreg_def_a32___def_vreg_a512_use_vreg_a256() {
; CHECK-LABEL: define amdgpu_kernel void @physreg_def_a32___def_vreg_a512_use_vreg_a256(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR14:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = call { i32, <16 x i32> } asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -618,7 +618,7 @@ define amdgpu_kernel void @physreg_def_a32___def_vreg_a512_use_vreg_a256() {
define amdgpu_kernel void @mixed_physreg_vreg_tuples_0() {
; CHECK-LABEL: define amdgpu_kernel void @mixed_physreg_vreg_tuples_0(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR11]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -630,7 +630,7 @@ define amdgpu_kernel void @mixed_physreg_vreg_tuples_0() {
define amdgpu_kernel void @mixed_physreg_vreg_tuples_1() {
; CHECK-LABEL: define amdgpu_kernel void @mixed_physreg_vreg_tuples_1(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR15:[0-9]+]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -642,7 +642,7 @@ define amdgpu_kernel void @mixed_physreg_vreg_tuples_1() {
define amdgpu_kernel void @physreg_raises_limit() {
; CHECK-LABEL: define amdgpu_kernel void @physreg_raises_limit(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR16:[0-9]+]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -652,10 +652,9 @@ define amdgpu_kernel void @physreg_raises_limit() {
ret void
}
-; FIXME: This should require 9. We cannot allocate an a128 at a0.
define amdgpu_kernel void @physreg_tuple_alignment_raises_limit() {
; CHECK-LABEL: define amdgpu_kernel void @physreg_tuple_alignment_raises_limit(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR11]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -667,7 +666,7 @@ define amdgpu_kernel void @physreg_tuple_alignment_raises_limit() {
define amdgpu_kernel void @align3_virtreg() {
; CHECK-LABEL: define amdgpu_kernel void @align3_virtreg(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR6]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -679,7 +678,7 @@ define amdgpu_kernel void @align3_virtreg() {
define amdgpu_kernel void @align3_align4_virtreg() {
; CHECK-LABEL: define amdgpu_kernel void @align3_align4_virtreg(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR15]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -691,7 +690,7 @@ define amdgpu_kernel void @align3_align4_virtreg() {
define amdgpu_kernel void @align2_align4_virtreg() {
; CHECK-LABEL: define amdgpu_kernel void @align2_align4_virtreg(
-; CHECK-SAME: ) #[[ATTR1]] {
+; CHECK-SAME: ) #[[ATTR15]] {
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
@@ -703,7 +702,7 @@ define amdgpu_kernel void @align2_align4_virtreg() {
define amdgpu_kernel void @kernel_uses_write_register_a55() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_write_register_a55(
-; CHECK-SAME: ) #[[ATTR3:[0-9]+]] {
+; CHECK-SAME: ) #[[ATTR17:[0-9]+]] {
; CHECK-NEXT: call void @llvm.write_register.i32(metadata [[META0:![0-9]+]], i32 0)
; CHECK-NEXT: ret void
;
@@ -713,71 +712,313 @@ define amdgpu_kernel void @kernel_uses_write_register_a55() {
define amdgpu_kernel void @kernel_uses_write_register_v55() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_write_register_v55(
-; CHECK-SAME: ) #[[ATTR4:[0-9]+]] {
+; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: call void @llvm.write_register.i32(metadata [[META1:![0-9]+]], i32 0)
+; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
call void @llvm.write_register.i64(metadata !1, i32 0)
+ call void @use_most()
ret void
}
define amdgpu_kernel void @kernel_uses_write_register_a55_57() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_write_register_a55_57(
-; CHECK-SAME: ) #[[ATTR3]] {
+; CHECK-SAME: ) #[[ATTR18:[0-9]+]] {
; CHECK-NEXT: call void @llvm.write_register.i96(metadata [[META2:![0-9]+]], i96 0)
+; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
call void @llvm.write_register.i64(metadata !2, i96 0)
+ call void @use_most()
ret void
}
define amdgpu_kernel void @kernel_uses_read_register_a55(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_read_register_a55(
-; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR19:[0-9]+]] {
; CHECK-NEXT: [[REG:%.*]] = call i32 @llvm.read_register.i32(metadata [[META0]])
; CHECK-NEXT: store i32 [[REG]], ptr addrspace(1) [[PTR]], align 4
+; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
%reg = call i32 @llvm.read_register.i64(metadata !0)
store i32 %reg, ptr addrspace(1) %ptr
+ call void @use_most()
ret void
}
define amdgpu_kernel void @kernel_uses_read_volatile_register_a55(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_read_volatile_register_a55(
-; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR19]] {
; CHECK-NEXT: [[REG:%.*]] = call i32 @llvm.read_volatile_register.i32(metadata [[META0]])
; CHECK-NEXT: store i32 [[REG]], ptr addrspace(1) [[PTR]], align 4
+; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
%reg = call i32 @llvm.read_volatile_register.i64(metadata !0)
store i32 %reg, ptr addrspace(1) %ptr
+ call void @use_most()
ret void
}
define amdgpu_kernel void @kernel_uses_read_register_a56_59(ptr addrspace(1) %ptr) {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_read_register_a56_59(
-; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) #[[ATTR20:[0-9]+]] {
; CHECK-NEXT: [[REG:%.*]] = call i128 @llvm.read_register.i128(metadata [[META3:![0-9]+]])
; CHECK-NEXT: store i128 [[REG]], ptr addrspace(1) [[PTR]], align 8
+; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
%reg = call i128 @llvm.read_register.i64(metadata !3)
store i128 %reg, ptr addrspace(1) %ptr
+ call void @use_most()
ret void
}
define amdgpu_kernel void @kernel_uses_write_register_out_of_bounds_a256() {
; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_write_register_out_of_bounds_a256(
-; CHECK-SAME: ) #[[ATTR3]] {
+; CHECK-SAME: ) #[[ATTR9]] {
; CHECK-NEXT: call void @llvm.write_register.i32(metadata [[META4:![0-9]+]], i32 0)
+; CHECK-NEXT: call void @use_most()
; CHECK-NEXT: ret void
;
call void @llvm.write_register.i64(metadata !4, i32 0)
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_multiple_uses() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_multiple_uses(
+; CHECK-SAME: ) #[[ATTR5]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i64 poison)
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ call void asm sideeffect "; use $0", "a"(i128 poison)
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_multiple_defs() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_multiple_defs(
+; CHECK-SAME: ) #[[ATTR5]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i64 asm sideeffect "
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 asm sideeffect "
+; CHECK-NEXT: [[TMP3:%.*]] = call i128 asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call i64 asm sideeffect "; def $0", "=a"()
+ call i32 asm sideeffect "; def $0", "=a"()
+ call i128 asm sideeffect "; def $0", "=a"()
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_multiple_use_defs() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_multiple_use_defs(
+; CHECK-SAME: ) #[[ATTR5]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: [[TMP1:%.*]] = call i128 asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(i32 poison)
+ call i128 asm sideeffect "; def $0", "=a"()
+ call void @use_most()
+ ret void
+}
+
+define void @callgraph_b() {
+; CHECK-LABEL: define void @callgraph_b(
+; CHECK-SAME: ) #[[ATTR15]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call <4 x i32> asm sideeffect "; def $0", "=a"()
+ call void asm sideeffect "; use $0", "a"(<8 x i32> poison)
+ call void @use_most()
+ ret void
+}
+
+define void @callgraph_c() {
+; CHECK-LABEL: define void @callgraph_c(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 asm sideeffect "
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call i32 asm sideeffect "; def $0", "=a"()
+ call void asm sideeffect "; use $0", "a"(<2 x i32> poison)
+ call void @use_most()
+ ret void
+}
+
+define void @callgraph_a(i1 %cond) {
+; CHECK-LABEL: define void @callgraph_a(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR15]] {
+; CHECK-NEXT: br i1 [[COND]], label [[A:%.*]], label [[B:%.*]]
+; CHECK: a:
+; CHECK-NEXT: call void @callgraph_b()
+; CHECK-NEXT: ret void
+; CHECK: b:
+; CHECK-NEXT: call void @callgraph_c()
+; CHECK-NEXT: ret void
+;
+ br i1 %cond, label %a, label %b
+
+a:
+ call void @callgraph_b()
+ ret void
+
+b:
+ call void @callgraph_c()
+ ret void
+}
+
+
+define void @kernel_max_callgraph(i1 %cond) {
+; CHECK-LABEL: define void @kernel_max_callgraph(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR15]] {
+; CHECK-NEXT: call void @callgraph_a(i1 [[COND]])
+; CHECK-NEXT: ret void
+;
+ call void @callgraph_a(i1 %cond)
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_all_virtregs() #1 {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_all_virtregs(
+; CHECK-SAME: ) #[[ATTR21:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a,a,a,a,a,a,a,a"(<32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison)
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @kernel_uses_all_virtregs_plus_1() #1 {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_uses_all_virtregs_plus_1(
+; CHECK-SAME: ) #[[ATTR21]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a,a,a,a,a,a,a,a,a"(<32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, <32 x i32> poison, i32 poison)
+ call void @use_most()
+ ret void
+}
+
+define void @recursive() {
+; CHECK-LABEL: define void @recursive(
+; CHECK-SAME: ) #[[ATTR22:[0-9]+]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: call void @recursive()
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(<7 x i32> poison)
+ call void @use_most()
+ call void @recursive()
+ ret void
+}
+
+define void @indirect_0() {
+; CHECK-LABEL: define void @indirect_0(
+; CHECK-SAME: ) #[[ATTR22]] {
+; CHECK-NEXT: call void asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void asm sideeffect "; use $0", "a"(<7 x i32> poison)
+ call void @use_most()
+ ret void
+}
+
+define void @indirect_1() {
+; CHECK-LABEL: define void @indirect_1(
+; CHECK-SAME: ) #[[ATTR23:[0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call <3 x i32> asm sideeffect "
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call <3 x i32> asm sideeffect "; def $0", "=a"()
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @knowable_indirect_call(i1 %cond) {
+; CHECK-LABEL: define amdgpu_kernel void @knowable_indirect_call(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR22]] {
+; CHECK-NEXT: [[FPTR:%.*]] = select i1 [[COND]], ptr @indirect_0, ptr @indirect_1
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq ptr [[FPTR]], @indirect_1
+; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP3:%.*]]
+; CHECK: 2:
+; CHECK-NEXT: call void @indirect_1()
+; CHECK-NEXT: br label [[TMP6:%.*]]
+; CHECK: 3:
+; CHECK-NEXT: br i1 true, label [[TMP4:%.*]], label [[TMP5:%.*]]
+; CHECK: 4:
+; CHECK-NEXT: call void @indirect_0()
+; CHECK-NEXT: br label [[TMP6]]
+; CHECK: 5:
+; CHECK-NEXT: unreachable
+; CHECK: 6:
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ %fptr = select i1 %cond, ptr @indirect_0, ptr @indirect_1
+ call void %fptr()
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @calls_poison(i1 %cond) {
+; CHECK-LABEL: define amdgpu_kernel void @calls_poison(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: call void poison()
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void poison()
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @calls_null(i1 %cond) {
+; CHECK-LABEL: define amdgpu_kernel void @calls_null(
+; CHECK-SAME: i1 [[COND:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: call void null()
+; CHECK-NEXT: call void @use_most()
+; CHECK-NEXT: ret void
+;
+ call void null()
+ call void @use_most()
+ ret void
+}
+
+define amdgpu_kernel void @indirect_unknown(ptr %fptr) {
+; CHECK-LABEL: define amdgpu_kernel void @indirect_unknown(
+; CHECK-SAME: ptr [[FPTR:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: call void [[FPTR]]()
+; CHECK-NEXT: ret void
+;
+ call void %fptr()
ret void
}
attributes #0 = { "amdgpu-agpr-alloc"="0" }
+attributes #1 = { "amdgpu-waves-per-eu"="1,1" }
!0 = !{!"a55"}
!1 = !{!"v55"}
@@ -787,16 +1028,35 @@ attributes #0 = { "amdgpu-agpr-alloc"="0" }
;.
; CHECK: attributes #[[ATTR0]] = { "amdgpu-agpr-alloc"="0" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2:[0-9]+]] = { convergent nocallback nofree nosync nounwind willreturn memory(none) "target-cpu"="gfx90a" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-agpr-alloc"="0" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx90a" }
-; CHECK: attributes #[[ATTR6:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
-; CHECK: attributes #[[ATTR7:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(read) "target-cpu"="gfx90a" }
-; CHECK: attributes #[[ATTR8:[0-9]+]] = { nounwind "target-cpu"="gfx90a" }
-; CHECK: attributes #[[ATTR9:[0-9]+]] = { nocallback nounwind "target-cpu"="gfx90a" }
-; CHECK: attributes #[[ATTR10]] = { "amdgpu-agpr-alloc"="0" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-agpr-alloc"="1" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-agpr-alloc"="2" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nosync nounwind willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-agpr-alloc"="4" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-agpr-alloc"="6" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-agpr-alloc"="5" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-agpr-alloc"="14" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR9]] = { "amdgpu-agpr-alloc"="256" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR10]] = { "amdgpu-agpr-alloc"="32" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR11]] = { "amdgpu-agpr-alloc"="9" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR12]] = { "amdgpu-agpr-alloc"="64" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR13]] = { "amdgpu-agpr-alloc"="49" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR14]] = { "amdgpu-agpr-alloc"="33" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR15]] = { "amdgpu-agpr-alloc"="8" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR16]] = { "amdgpu-agpr-alloc"="13" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR17]] = { "amdgpu-agpr-alloc"="56" "amdgpu-no-cluster-id-x" "amdgpu-no-cluster-id-y" "amdgpu-no-cluster-id-z" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-flat-scratch-init" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR18]] = { "amdgpu-agpr-alloc"="58" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR19]] = { "amdgpu-agpr-alloc"="56" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR20]] = { "amdgpu-agpr-alloc"="60" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR21]] = { "amdgpu-agpr-alloc"="256" "amdgpu-waves-per-eu"="1,1" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR22]] = { "amdgpu-agpr-alloc"="7" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR23]] = { "amdgpu-agpr-alloc"="3" "target-cpu"="gfx90a" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR24:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR25:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR26:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(read) "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR27:[0-9]+]] = { nounwind "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR28:[0-9]+]] = { nocallback nounwind "target-cpu"="gfx90a" }
+; CHECK: attributes #[[ATTR29]] = { "amdgpu-agpr-alloc"="0" }
;.
; CHECK: [[META0]] = !{!"a55"}
; CHECK: [[META1]] = !{!"v55"}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
new file mode 100644
index 0000000..6c4f504
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -0,0 +1,452 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s -check-prefix=O3-CHECK
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK: [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0 ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK: [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 0, %ballot ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[WHILE:.*]]
+; CURRENT-CHECK: [[WHILE]]:
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP0:![0-9]+]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+ %is_done = icmp ne i64 0, %ballot ; in this case is_done = done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[WHILE:.*]]
+; CURRENT-CHECK: [[WHILE]]:
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP2:![0-9]+]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+ %is_done = icmp ne i64 %ballot, 0 ; in this case is_done = done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_uniform_waterfall(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
+; CURRENT-CHECK: [[WORK_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 0, 0
+; PASS-CHECK-NEXT: br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK: [[WORK]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[TAIL]]
+; PASS-CHECK: [[TAIL]]:
+; PASS-CHECK-NEXT: [[NEW_DONE]] = phi i1 [ true, %[[WORK]] ], [ false, %[[IF]] ]
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ false, %entry ], [ %new_done, %tail ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0
+ br i1 %is_done, label %exit, label %if
+
+if:
+ %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 0)
+ %is_first_active_id = icmp eq i32 0, %first_active_id
+ br i1 %is_first_active_id, label %work, label %tail
+
+work:
+ store i32 5, ptr addrspace(1) %out
+ br label %tail
+
+tail:
+ %new_done = phi i1 [ true, %work ], [ false, %if ]
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @uniform_waterfall(ptr addrspace(1) %out, i32 %mymask) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]], i32 [[MYMASK:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
+; CURRENT-CHECK: [[WORK_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MYMASK:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[MYMASK]], [[MYMASK]]
+; PASS-CHECK-NEXT: br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK: [[WORK]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[TAIL]]
+; PASS-CHECK: [[TAIL]]:
+; PASS-CHECK-NEXT: [[NEW_DONE]] = phi i1 [ true, %[[WORK]] ], [ false, %[[IF]] ]
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[MYMASK:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ false, %entry ], [ %new_done, %tail ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0
+ br i1 %is_done, label %exit, label %if
+
+if:
+ %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %mymask)
+ %is_first_active_id = icmp eq i32 %mymask, %first_active_id
+ br i1 %is_first_active_id, label %work, label %tail
+
+work:
+ store i32 5, ptr addrspace(1) %out
+ br label %tail
+
+tail:
+ %new_done = phi i1 [ true, %work ], [ false, %if ]
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[BALLOT_PEEL:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[BALLOT_PEEL]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK: [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i32 @llvm.amdgcn.ballot.i32(i1 %not_done)
+ %is_done = icmp eq i32 %ballot, 0 ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[WHILE:.*]]
+; CURRENT-CHECK: [[WHILE]]:
+; CURRENT-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_NOT:%.*]] = icmp eq i32 [[BALLOT]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %ballot = tail call i32 @llvm.amdgcn.ballot.i32(i1 %done)
+ %is_done = icmp ne i32 0, %ballot ; in this case is_done = done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+declare i64 @llvm.amdgcn.ballot.i64(i1) #1
+!6 = !{i64 690}
+!7 = distinct !{!7, !8}
+!8 = !{!"llvm.loop.mustprogress"}
+;.
+; CURRENT-CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; CURRENT-CHECK: [[META1]] = !{!"llvm.loop.peeled.count", i32 1}
+; CURRENT-CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CURRENT-CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
new file mode 100644
index 0000000..aa11574
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -0,0 +1,790 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK
+
+define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CURRENT-CHECK-NEXT: store i32 77, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT: store i32 77, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; DCE-CHECK-NEXT: store i32 77, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 77)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TID2:%.*]] = add nuw nsw i32 [[TID]], 1
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TIDX]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[TIDX2:%.*]] = add nuw nsw i32 [[TIDX]], 1
+; CURRENT-CHECK-NEXT: [[TIDY2:%.*]] = add nuw nsw i32 [[TIDY]], 2
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TIDX]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; PASS-CHECK-NEXT: [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; DCE-CHECK-NEXT: [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %tidx2 = add i32 %tidx, 1
+ %tidy2 = add i32 %tidy, 2
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx2, i32 %tidy2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TID2:%.*]] = add nuw nsw i32 [[TID]], 1
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID2]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID2]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID2]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
+ %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; CURRENT-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; PASS-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; DCE-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
+ %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+
+define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT_MIN:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT_MAX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; CURRENT-CHECK-NEXT: store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; PASS-CHECK-NEXT: store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; DCE-CHECK-NEXT: store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %min_v = call i32 @llvm.amdgcn.permlane64(i32 -2147483648)
+ store i32 %min_v, ptr addrspace(1) %out_min
+ %max_v = call i32 @llvm.amdgcn.permlane64(i32 2147483647)
+ store i32 %max_v, ptr addrspace(1) %out_max
+ ret void
+}
+
+define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = add nuw nsw i32 [[TIDX]], 5
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = add i32 %tidx, 5
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 435, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[RANDOM:%.*]] = xor i32 123, 456
+; PASS-CHECK-NEXT: store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[RANDOM:%.*]] = xor i32 123, 456
+; DCE-CHECK-NEXT: store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %random = xor i32 123, 456
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %random)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[IDX1:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[IDX2:%.*]] = shl nuw nsw i32 [[IDX1]], 1
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[IDX2:%.*]] = mul i32 [[IDX1]], 2
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[IDX2:%.*]] = mul i32 [[IDX1]], 2
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %idx1 = call i32 @llvm.amdgcn.workitem.id.x()
+ %idx2 = mul i32 %idx1, 2
+ %v = call i32 @llvm.amdgcn.readlane(i32 %idx1, i32 %idx2)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @ballot_i32(i32 %v, ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; CURRENT-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 1)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; CURRENT-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; CURRENT-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[BALLOT]], 0
+; CURRENT-CHECK-NEXT: store i1 [[BALLOT_NE_ZERO]], ptr addrspace(1) [[OUT]], align 1
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; DCE-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; DCE-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; DCE-CHECK-NEXT: ret void
+;
+ %c = trunc i32 %v to i1
+ %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
+ %ballot_ne_zero = icmp ne i32 %ballot, 0
+ store i1 %ballot_ne_zero, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @ballot_i64(i32 %v, ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; CURRENT-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 1)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; CURRENT-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[TMP1]], 0
+; CURRENT-CHECK-NEXT: store i1 [[BALLOT_NE_ZERO]], ptr addrspace(1) [[OUT]], align 1
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; DCE-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; DCE-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; DCE-CHECK-NEXT: ret void
+;
+ %c = trunc i32 %v to i1
+ %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
+ %ballot_ne_zero = icmp ne i64 %ballot, 0
+ store i1 %ballot_ne_zero, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_i16(i16 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; CURRENT-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; PASS-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; DCE-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call i16 @llvm.amdgcn.readlane.i16(i16 %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(i16 %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_i64(i64 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; CURRENT-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; PASS-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; DCE-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(i64 %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_bf16(bfloat %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; CURRENT-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; PASS-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; DCE-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call bfloat @llvm.amdgcn.readlane.bf16(bfloat %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(bfloat %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_f16(half %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; CURRENT-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; PASS-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; DCE-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call half @llvm.amdgcn.readlane.f16(half %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(half %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_f32(float %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; CURRENT-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; PASS-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; DCE-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call float @llvm.amdgcn.readlane.f32(float %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(float %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_f64(double %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; CURRENT-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; PASS-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; DCE-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call double @llvm.amdgcn.readlane.f64(double %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(double %readlane)
+ ret void
+}
+; All such cases can be optimised, given generic way to query getDeclarationIfExists()
+define void @test_readlane_v8i16(ptr addrspace(1) %out, <8 x i16> %src, i32 %src1) {
+; CURRENT-CHECK-LABEL: define void @test_readlane_v8i16(
+; CURRENT-CHECK-SAME: ptr addrspace(1) readnone captures(none) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: [[X:%.*]] = tail call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define void @test_readlane_v8i16(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[X:%.*]] = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define void @test_readlane_v8i16(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[X:%.*]] = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %x = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> %src, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(<8 x i16> %x)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
new file mode 100644
index 0000000..2fde3e3
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=COMB-CHECK
+
+; This should not be optimized
+define amdgpu_cs void @temporal_divergence(ptr addrspace(1) %out, i32 %n) {
+; PASS-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: br label %[[H:.*]]
+; PASS-CHECK: [[H]]:
+; PASS-CHECK-NEXT: [[UNI_MERGE_H:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[UNI_INC:%.*]], %[[H]] ]
+; PASS-CHECK-NEXT: [[UNI_INC]] = add i32 [[UNI_MERGE_H]], 1
+; PASS-CHECK-NEXT: [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; PASS-CHECK-NEXT: br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; PASS-CHECK: [[X]]:
+; PASS-CHECK-NEXT: [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[UNI_INC]])
+; PASS-CHECK-NEXT: [[JOIN_USER:%.*]] = add i32 [[UNI_JOIN]], 5
+; PASS-CHECK-NEXT: store i32 [[JOIN_USER]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; COMB-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; COMB-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; COMB-CHECK-NEXT: [[ENTRY:.*]]:
+; COMB-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; COMB-CHECK-NEXT: br label %[[H:.*]]
+; COMB-CHECK: [[H]]:
+; COMB-CHECK-NEXT: [[UNI_MERGE_H:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[UNI_INC:%.*]], %[[H]] ]
+; COMB-CHECK-NEXT: [[UNI_INC]] = add i32 [[UNI_MERGE_H]], 1
+; COMB-CHECK-NEXT: [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; COMB-CHECK-NEXT: br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; COMB-CHECK: [[X]]:
+; COMB-CHECK-NEXT: [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[UNI_INC]])
+; COMB-CHECK-NEXT: [[JOIN_USER:%.*]] = add i32 [[UNI_JOIN]], 5
+; COMB-CHECK-NEXT: store i32 [[JOIN_USER]], ptr addrspace(1) [[OUT]], align 4
+; COMB-CHECK-NEXT: ret void
+;
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ br label %H
+
+H:
+ %uni.merge.h = phi i32 [ 0, %entry ], [ %uni.inc, %H ]
+ %uni.inc = add i32 %uni.merge.h, 1
+ %div.exitx = icmp eq i32 %tid, 0
+ br i1 %div.exitx, label %X, label %H ; divergent branch
+
+X:
+ %uni.join = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %uni.inc)
+ %join.user = add i32 %uni.join, 5
+ store i32 %join.user, ptr addrspace(1) %out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.readfirstlane.i32(i32)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 2ae6fc2..4a6fa4f 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -691,7 +691,8 @@ define amdgpu_kernel void @uaddo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
; GCN-ISEL-LABEL: name: suaddo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_ADD_U64_PSEUDO
+; GCN-ISEL: S_UADDO_PSEUDO
+; GCN-ISEL: S_ADD_CO_PSEUDO
define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
; CISI-LABEL: suaddo64:
@@ -700,21 +701,23 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s11, 0xf000
; CISI-NEXT: s_mov_b32 s10, -1
; CISI-NEXT: s_waitcnt lgkmcnt(0)
-; CISI-NEXT: s_add_u32 s6, s4, s6
-; CISI-NEXT: v_mov_b32_e32 v0, s4
-; CISI-NEXT: s_addc_u32 s7, s5, s7
-; CISI-NEXT: v_mov_b32_e32 v1, s5
-; CISI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT: v_mov_b32_e32 v2, s6
+; CISI-NEXT: s_add_u32 s4, s4, s6
+; CISI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; CISI-NEXT: s_or_b32 s6, s12, s13
+; CISI-NEXT: s_cmp_lg_u32 s6, 0
+; CISI-NEXT: s_addc_u32 s5, s5, s7
; CISI-NEXT: s_mov_b32 s8, s0
; CISI-NEXT: s_mov_b32 s9, s1
+; CISI-NEXT: v_mov_b32_e32 v0, s4
+; CISI-NEXT: v_mov_b32_e32 v1, s5
+; CISI-NEXT: s_cselect_b64 s[4:5], -1, 0
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s10
; CISI-NEXT: s_mov_b32 s3, s11
-; CISI-NEXT: v_mov_b32_e32 v3, s7
-; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT: s_waitcnt expcnt(0)
+; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; CISI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; CISI-NEXT: s_endpgm
;
@@ -722,37 +725,37 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_add_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_add_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_addc_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: suaddo64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_addc_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_add_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_addc_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX1010-LABEL: suaddo64:
@@ -761,10 +764,12 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: v_mov_b32_e32 v2, 0
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_add_u32 s0, s12, s14
-; GFX1010-NEXT: s_addc_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s1, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v0, s0
+; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1010-NEXT: s_addc_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v1, s1
-; GFX1010-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX1010-NEXT: global_store_byte v2, v3, s[10:11]
@@ -775,11 +780,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT: s_add_u32 s6, s4, s6
-; GFX1030W32-NEXT: s_addc_u32 s7, s5, s7
-; GFX1030W32-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT: s_add_u32 s4, s4, s6
+; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT: s_addc_u32 s5, s5, s7
+; GFX1030W32-NEXT: s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
@@ -790,11 +797,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT: s_add_u32 s6, s4, s6
-; GFX1030W64-NEXT: s_addc_u32 s7, s5, s7
-; GFX1030W64-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT: v_cmp_lt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT: s_add_u32 s4, s4, s6
+; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT: s_addc_u32 s5, s5, s7
+; GFX1030W64-NEXT: v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
@@ -804,12 +813,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_u32 s6, s4, s6
-; GFX11-NEXT: s_addc_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_add_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_addc_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -819,12 +829,14 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-LABEL: suaddo64:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[12:13], s[14:15]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX1250-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
+; GFX1250-NEXT: s_add_co_u32 s0, s12, s14
+; GFX1250-NEXT: s_cselect_b32 s1, -1, 0
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
+; GFX1250-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1250-NEXT: s_add_co_ci_u32 s1, s13, s15
+; GFX1250-NEXT: s_cselect_b32 s0, -1, 0
+; GFX1250-NEXT: v_mov_b32_e32 v1, s1
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9]
@@ -841,7 +853,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GCN-ISEL-LABEL: name: vuaddo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_ADD_U64_PSEUDO
+; GCN-ISEL: V_ADD_CO_U32_e64
+; GCN-ISEL: V_ADDC_U32_e64
define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
; CISI-LABEL: vuaddo64:
@@ -854,9 +867,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s4, s0
; CISI-NEXT: v_mov_b32_e32 v1, s9
; CISI-NEXT: v_add_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[0:1]
; CISI-NEXT: s_mov_b32 s5, s1
+; CISI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s6
@@ -876,7 +888,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI-NEXT: v_mov_b32_e32 v6, s5
; VI-NEXT: v_add_u32_e32 v5, vcc, s4, v0
; VI-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[5:6]
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_mov_b32_e32 v4, s3
@@ -894,7 +905,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v2, v0, s[2:3]
@@ -909,8 +919,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: v_add_co_u32 v0, s4, s6, v0
; GFX1010-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1010-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1010-NEXT: s_endpgm
@@ -923,9 +932,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: v_add_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT: v_add_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W32-NEXT: s_endpgm
@@ -938,9 +946,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: v_add_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT: v_add_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT: v_add_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W64-NEXT: s_endpgm
@@ -955,10 +962,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_u32 v0, s4, s6, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: global_store_b8 v2, v3, s[2:3]
@@ -969,16 +975,17 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3]
; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -1671,7 +1678,8 @@ define amdgpu_kernel void @usubo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
; GCN-ISEL-LABEL: name: susubo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_SUB_U64_PSEUDO
+; GCN-ISEL: S_USUBO_PSEUDO
+; GCN-ISEL: S_SUB_CO_PSEUDO
define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
; CISI-LABEL: susubo64:
@@ -1680,21 +1688,23 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s11, 0xf000
; CISI-NEXT: s_mov_b32 s10, -1
; CISI-NEXT: s_waitcnt lgkmcnt(0)
-; CISI-NEXT: s_sub_u32 s6, s4, s6
-; CISI-NEXT: v_mov_b32_e32 v0, s4
-; CISI-NEXT: s_subb_u32 s7, s5, s7
-; CISI-NEXT: v_mov_b32_e32 v1, s5
-; CISI-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT: v_mov_b32_e32 v2, s6
+; CISI-NEXT: s_sub_u32 s4, s4, s6
+; CISI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; CISI-NEXT: s_or_b32 s6, s12, s13
+; CISI-NEXT: s_cmp_lg_u32 s6, 0
+; CISI-NEXT: s_subb_u32 s5, s5, s7
; CISI-NEXT: s_mov_b32 s8, s0
; CISI-NEXT: s_mov_b32 s9, s1
+; CISI-NEXT: v_mov_b32_e32 v0, s4
+; CISI-NEXT: v_mov_b32_e32 v1, s5
+; CISI-NEXT: s_cselect_b64 s[4:5], -1, 0
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s10
; CISI-NEXT: s_mov_b32 s3, s11
-; CISI-NEXT: v_mov_b32_e32 v3, s7
-; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT: s_waitcnt expcnt(0)
+; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; CISI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; CISI-NEXT: s_endpgm
;
@@ -1702,37 +1712,37 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_sub_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_sub_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_subb_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_subb_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: susubo64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_subb_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_sub_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_subb_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX1010-LABEL: susubo64:
@@ -1741,10 +1751,12 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: v_mov_b32_e32 v2, 0
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_sub_u32 s0, s12, s14
-; GFX1010-NEXT: s_subb_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s1, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v0, s0
+; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1010-NEXT: s_subb_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v1, s1
-; GFX1010-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX1010-NEXT: global_store_byte v2, v3, s[10:11]
@@ -1755,11 +1767,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT: s_sub_u32 s6, s4, s6
-; GFX1030W32-NEXT: s_subb_u32 s7, s5, s7
-; GFX1030W32-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT: s_sub_u32 s4, s4, s6
+; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT: s_subb_u32 s5, s5, s7
+; GFX1030W32-NEXT: s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
@@ -1770,11 +1784,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT: s_sub_u32 s6, s4, s6
-; GFX1030W64-NEXT: s_subb_u32 s7, s5, s7
-; GFX1030W64-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT: v_cmp_gt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT: s_sub_u32 s4, s4, s6
+; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT: s_subb_u32 s5, s5, s7
+; GFX1030W64-NEXT: v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
@@ -1784,12 +1800,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_sub_u32 s6, s4, s6
-; GFX11-NEXT: s_subb_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_sub_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_subb_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1799,12 +1816,14 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-LABEL: susubo64:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_nc_u64 s[0:1], s[12:13], s[14:15]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX1250-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
+; GFX1250-NEXT: s_sub_co_u32 s0, s12, s14
+; GFX1250-NEXT: s_cselect_b32 s1, -1, 0
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
+; GFX1250-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1250-NEXT: s_sub_co_ci_u32 s1, s13, s15
+; GFX1250-NEXT: s_cselect_b32 s0, -1, 0
+; GFX1250-NEXT: v_mov_b32_e32 v1, s1
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9]
@@ -1821,7 +1840,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GCN-ISEL-LABEL: name: vusubo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_SUB_U64_PSEUDO
+; GCN-ISEL: V_SUB_CO_U32_e64
+; GCN-ISEL: V_SUBB_U32_e64
define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
; CISI-LABEL: vusubo64:
@@ -1834,9 +1854,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s4, s0
; CISI-NEXT: v_mov_b32_e32 v1, s9
; CISI-NEXT: v_sub_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
; CISI-NEXT: s_mov_b32 s5, s1
+; CISI-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s6
@@ -1856,7 +1875,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI-NEXT: v_mov_b32_e32 v6, s5
; VI-NEXT: v_sub_u32_e32 v5, vcc, s4, v0
; VI-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_mov_b32_e32 v4, s3
@@ -1874,7 +1892,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v2, v0, s[2:3]
@@ -1889,8 +1906,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: v_sub_co_u32 v0, s4, s6, v0
; GFX1010-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1010-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1010-NEXT: s_endpgm
@@ -1903,9 +1919,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: v_sub_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT: v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W32-NEXT: s_endpgm
@@ -1918,9 +1933,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: v_sub_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT: v_sub_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT: v_sub_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W64-NEXT: s_endpgm
@@ -1935,10 +1949,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_sub_co_u32 v0, s4, s6, v0
-; GFX11-NEXT: v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: global_store_b8 v2, v3, s[2:3]
@@ -1949,16 +1962,17 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_sub_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_sub_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3]
; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/mad_int24.ll b/llvm/test/CodeGen/AMDGPU/mad_int24.ll
index 93fda94..dd88310 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_int24.ll
@@ -1,17 +1,79 @@
-; RUN: llc < %s -mtriple=amdgcn | FileCheck %s --check-prefix=GCN --check-prefix=FUNC
-; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefix=GCN --check-prefix=FUNC
-; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=amdgcn| FileCheck %s --check-prefixes=GCN
+; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefixes=VI
+; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefixes=EG,R600,RW
+; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefixes=EG,R600,CM
-; FUNC-LABEL: {{^}}i32_mad24:
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
-; EG: MULLO_INT
-; CM: MULLO_INT
-; GCN: s_bfe_i32
-; GCN: s_bfe_i32
-; GCN: s_mul_i32
-; GCN: s_add_i32
define amdgpu_kernel void @i32_mad24(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: i32_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_bfe_i32 s0, s0, 0x180000
+; GCN-NEXT: s_bfe_i32 s1, s1, 0x180000
+; GCN-NEXT: s_mul_i32 s0, s0, s1
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; VI-LABEL: i32_mad24:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_bfe_i32 s0, s0, 0x180000
+; VI-NEXT: s_bfe_i32 s1, s1, 0x180000
+; VI-NEXT: s_mul_i32 s0, s0, s1
+; VI-NEXT: s_add_i32 s0, s0, s2
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; RW-LABEL: i32_mad24:
+; RW: ; %bb.0: ; %entry
+; RW-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[]
+; RW-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; RW-NEXT: CF_END
+; RW-NEXT: PAD
+; RW-NEXT: ALU clause starting at 4:
+; RW-NEXT: LSHL T0.W, KC0[2].Z, literal.x,
+; RW-NEXT: LSHL * T1.W, KC0[2].W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR T1.W, PS, literal.x,
+; RW-NEXT: ASHR * T0.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; RW-NEXT: ADD_INT T0.X, PS, KC0[3].X,
+; RW-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; RW-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: i32_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 12, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: ALU clause starting at 4:
+; CM-NEXT: LSHL T0.Z, KC0[2].Z, literal.x,
+; CM-NEXT: LSHL * T0.W, KC0[2].W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR T1.Z, PV.W, literal.x,
+; CM-NEXT: ASHR * T0.W, PV.Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T1.Z,
+; CM-NEXT: ADD_INT * T0.X, PV.X, KC0[3].X,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%0 = shl i32 %a, 8
%a_24 = ashr i32 %0, 8
@@ -23,13 +85,25 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mad24_known_bits_destroyed:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mad_i32_i24
-; GCN-NEXT: v_mul_i32_i24
-; GCN-NEXT: s_setpc_b64
define i32 @mad24_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
-
+; GCN-LABEL: mad24_known_bits_destroyed:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; GCN-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: mad24_known_bits_destroyed:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; VI-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; EG-LABEL: mad24_known_bits_destroyed:
+; EG: ; %bb.0:
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
%shl.0 = shl i32 %a, 8
%sra.0 = ashr i32 %shl.0, 8
%shl.1 = shl i32 %b, 8
@@ -48,12 +122,25 @@ define i32 @mad24_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
ret i32 %mul1
}
-; GCN-LABEL: {{^}}mad24_intrin_known_bits_destroyed:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mad_i32_i24
-; GCN-NEXT: v_mul_i32_i24
-; GCN-NEXT: s_setpc_b64
define i32 @mad24_intrin_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: mad24_intrin_known_bits_destroyed:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; GCN-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: mad24_intrin_known_bits_destroyed:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; VI-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; EG-LABEL: mad24_intrin_known_bits_destroyed:
+; EG: ; %bb.0:
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
%shl.0 = shl i32 %a, 8
%sra.0 = ashr i32 %shl.0, 8
%shl.1 = shl i32 %b, 8
@@ -73,17 +160,177 @@ define i32 @mad24_intrin_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
}
; Make sure no unnecessary BFEs are emitted in the loop.
-; GCN-LABEL: {{^}}mad24_destroyed_knownbits_2:
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
define void @mad24_destroyed_knownbits_2(i32 %arg, i32 %arg1, i32 %arg2, ptr addrspace(1) %arg3) {
+; GCN-LABEL: mad24_destroyed_knownbits_2:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v5, 1
+; GCN-NEXT: s_mov_b64 s[4:5], 0
+; GCN-NEXT: .LBB3_1: ; %bb6
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: v_mad_i32_i24 v0, v0, v5, v5
+; GCN-NEXT: v_add_i32_e32 v1, vcc, -1, v1
+; GCN-NEXT: v_mad_i32_i24 v5, v0, v5, v0
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_mad_i32_i24 v0, v5, v0, v5
+; GCN-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT: v_mad_i32_i24 v0, v0, v5, v0
+; GCN-NEXT: v_mov_b32_e32 v5, v2
+; GCN-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_cbranch_execnz .LBB3_1
+; GCN-NEXT: ; %bb.2: ; %bb5
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s4, s6
+; GCN-NEXT: s_mov_b32 s5, s6
+; GCN-NEXT: buffer_store_dword v0, v[3:4], s[4:7], 0 addr64
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: mad24_destroyed_knownbits_2:
+; VI: ; %bb.0: ; %bb
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v5, 1
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_1: ; %bb6
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_mad_i32_i24 v0, v0, v5, v5
+; VI-NEXT: v_mad_i32_i24 v5, v0, v5, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, -1, v1
+; VI-NEXT: v_mad_i32_i24 v0, v5, v0, v5
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_mad_i32_i24 v0, v0, v5, v0
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v5, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB3_1
+; VI-NEXT: ; %bb.2: ; %bb5
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: flat_store_dword v[3:4], v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; RW-LABEL: mad24_destroyed_knownbits_2:
+; RW: ; %bb.0: ; %bb
+; RW-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; RW-NEXT: LOOP_START_DX10 @7
+; RW-NEXT: ALU_PUSH_BEFORE 30, @16, KC0[], KC1[]
+; RW-NEXT: JUMP @6 POP:1
+; RW-NEXT: LOOP_BREAK @6
+; RW-NEXT: POP @6 POP:1
+; RW-NEXT: END_LOOP @2
+; RW-NEXT: ALU 1, @47, KC0[], KC1[]
+; RW-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; RW-NEXT: CF_END
+; RW-NEXT: ALU clause starting at 10:
+; RW-NEXT: MOV T0.X, KC0[2].Y,
+; RW-NEXT: MOV T0.Y, KC0[2].Z,
+; RW-NEXT: MOV * T0.Z, KC0[2].W,
+; RW-NEXT: MOV T0.W, KC0[3].X,
+; RW-NEXT: MOV * T1.W, literal.x,
+; RW-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; RW-NEXT: ALU clause starting at 16:
+; RW-NEXT: LSHL T2.W, T1.W, literal.x,
+; RW-NEXT: LSHL * T3.W, T0.X, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR T3.W, PS, literal.x,
+; RW-NEXT: ASHR * T2.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PV.W, PS,
+; RW-NEXT: ADD_INT * T1.W, PS, T1.W,
+; RW-NEXT: LSHL * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PV.W, T2.W,
+; RW-NEXT: ADD_INT * T1.W, PS, T1.W,
+; RW-NEXT: LSHL * T2.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR * T2.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PV.W, T3.W,
+; RW-NEXT: ADD_INT * T1.W, PS, T1.W,
+; RW-NEXT: LSHL * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ADD_INT T0.Y, T0.Y, literal.x,
+; RW-NEXT: MULLO_INT * T0.X, PV.W, T2.W,
+; RW-NEXT: -1(nan), 0(0.000000e+00)
+; RW-NEXT: ADD_INT T0.X, PS, T1.W,
+; RW-NEXT: SETE_INT T2.W, PV.Y, 0.0,
+; RW-NEXT: MOV * T1.W, T0.Z,
+; RW-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; RW-NEXT: ALU clause starting at 47:
+; RW-NEXT: LSHR * T1.X, T0.W, literal.x,
+; RW-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: mad24_destroyed_knownbits_2:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @7
+; CM-NEXT: ALU_PUSH_BEFORE 41, @16, KC0[], KC1[]
+; CM-NEXT: JUMP @6 POP:1
+; CM-NEXT: LOOP_BREAK @6
+; CM-NEXT: POP @6 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: ALU 1, @58, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1.X, T0.X
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 10:
+; CM-NEXT: MOV * T1.X, KC0[2].Y,
+; CM-NEXT: MOV T0.X, KC0[2].Z,
+; CM-NEXT: MOV T0.Y, KC0[2].W,
+; CM-NEXT: MOV T0.Z, KC0[3].X,
+; CM-NEXT: MOV * T0.W, literal.x,
+; CM-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; CM-NEXT: ALU clause starting at 16:
+; CM-NEXT: LSHL T1.Z, T0.W, literal.x,
+; CM-NEXT: LSHL * T1.W, T1.X, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR T2.Z, PV.W, literal.x,
+; CM-NEXT: ASHR * T1.W, PV.Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T1.X, T2.Z, T1.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T2.Z, T1.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T2.Z, T1.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T2.Z, T1.W,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.W,
+; CM-NEXT: LSHL * T2.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR * T2.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T1.X, T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T2.W, T1.W,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.W,
+; CM-NEXT: LSHL * T1.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR * T1.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T1.X, T1.W, T2.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T1.W, T2.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T1.W, T2.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T1.W, T2.W,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.W,
+; CM-NEXT: LSHL * T2.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T0.X, T0.X, literal.x,
+; CM-NEXT: ASHR * T2.W, PV.W, literal.y,
+; CM-NEXT: -1(nan), 8(1.121039e-44)
+; CM-NEXT: MULLO_INT T1.X, T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T2.W, T1.W,
+; CM-NEXT: ADD_INT T1.X, PV.X, T0.W,
+; CM-NEXT: SETE_INT T1.Z, T0.X, 0.0,
+; CM-NEXT: MOV * T0.W, T0.Y,
+; CM-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.Z, 0.0,
+; CM-NEXT: ALU clause starting at 58:
+; CM-NEXT: LSHR * T0.X, T0.Z, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
bb:
br label %bb6
@@ -119,3 +366,5 @@ bb6: ; preds = %bb6, %bb
}
declare i32 @llvm.amdgcn.mul.i24(i32, i32)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; R600: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index a6d458e..46b8df4 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -1,19 +1,75 @@
-; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -mtriple=amdgcn | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
-; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
-; RUN: llc < %s -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefixes=EG
+; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefixes=CM
+; RUN: llc < %s -mtriple=amdgcn | FileCheck %s --check-prefixes=GCN
+; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefixes=GFX8,SI
+; RUN: llc < %s -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global | FileCheck %s --check-prefixes=GFX8,VI
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
-; FUNC-LABEL: {{^}}u32_mad24:
-; EG: MULLO_INT
-; SI: s_mul_i32
-; SI: s_add_i32
-; VI: s_mul_{{[iu]}}32
-; VI: s_add_{{[iu]}}32
-
define amdgpu_kernel void @u32_mad24(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; EG-LABEL: u32_mad24:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 6, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: ALU clause starting at 4:
+; EG-NEXT: AND_INT T0.W, KC0[2].W, literal.x,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Z, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; EG-NEXT: ADD_INT T0.X, PS, KC0[3].X,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: u32_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: ALU clause starting at 4:
+; CM-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
+; CM-NEXT: AND_INT * T0.W, KC0[2].Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T0.Z,
+; CM-NEXT: ADD_INT * T0.X, PV.X, KC0[3].X,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: u32_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s0, 0xffffff
+; GCN-NEXT: s_and_b32 s1, s1, 0xffffff
+; GCN-NEXT: s_mul_i32 s0, s0, s1
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: u32_mad24:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_and_b32 s0, s0, 0xffffff
+; GFX8-NEXT: s_and_b32 s1, s1, 0xffffff
+; GFX8-NEXT: s_mul_i32 s0, s0, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s2
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
@@ -25,18 +81,88 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i16_mad24:
; The order of A and B does not matter.
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 16
-; GCN: s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
-; GCN: s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
-; GCN: s_sext_i32_i16 [[EXT:s[0-9]]], [[MAD]]
-; GCN: v_mov_b32_e32 v0, [[EXT]]
define amdgpu_kernel void @i16_mad24(ptr addrspace(1) %out, i16 %a, i16 %b, i16 %c) {
+; EG-LABEL: i16_mad24:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 0, @12, KC0[], KC1[]
+; EG-NEXT: TEX 2 @6
+; EG-NEXT: ALU 4, @13, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 40, #3
+; EG-NEXT: VTX_READ_16 T2.X, T0.X, 42, #3
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 44, #3
+; EG-NEXT: ALU clause starting at 12:
+; EG-NEXT: MOV * T0.X, 0.0,
+; EG-NEXT: ALU clause starting at 13:
+; EG-NEXT: MULLO_INT * T0.Y, T1.X, T2.X,
+; EG-NEXT: ADD_INT * T0.W, PS, T0.X,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+;
+; CM-LABEL: i16_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 0, @12, KC0[], KC1[]
+; CM-NEXT: TEX 2 @6
+; CM-NEXT: ALU 8, @13, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 6:
+; CM-NEXT: VTX_READ_16 T1.X, T0.X, 40, #3
+; CM-NEXT: VTX_READ_16 T2.X, T0.X, 42, #3
+; CM-NEXT: VTX_READ_16 T0.X, T0.X, 44, #3
+; CM-NEXT: ALU clause starting at 12:
+; CM-NEXT: MOV * T0.X, 0.0,
+; CM-NEXT: ALU clause starting at 13:
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Y, T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T1.X, T2.X,
+; CM-NEXT: ADD_INT * T0.W, PV.Y, T0.X,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i16_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-NEXT: s_load_dword s4, s[4:5], 0xb
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshr_b32 s2, s2, 16
+; GCN-NEXT: s_mul_i32 s2, s4, s2
+; GCN-NEXT: s_add_i32 s2, s2, s3
+; GCN-NEXT: s_sext_i32_i16 s2, s2
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mov_b32 s5, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: i16_mad24:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, s0
+; GFX8-NEXT: s_lshr_b32 s0, s2, 16
+; GFX8-NEXT: s_mul_i32 s0, s8, s0
+; GFX8-NEXT: s_add_i32 s0, s0, s3
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_mov_b32 s5, s1
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = mul i16 %a, %b
%1 = add i16 %0, %c
@@ -46,17 +172,85 @@ entry:
}
; FIXME: Need to handle non-uniform case for function below (load without gep).
-; FUNC-LABEL: {{^}}i8_mad24:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; GCN: s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
-; GCN: s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
-; GCN: s_sext_i32_i8 [[EXT:s[0-9]]], [[MAD]]
-; GCN: v_mov_b32_e32 v0, [[EXT]]
define amdgpu_kernel void @i8_mad24(ptr addrspace(1) %out, i8 %a, i8 %b, i8 %c) {
+; EG-LABEL: i8_mad24:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 0, @12, KC0[], KC1[]
+; EG-NEXT: TEX 2 @6
+; EG-NEXT: ALU 4, @13, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 40, #3
+; EG-NEXT: VTX_READ_8 T2.X, T0.X, 41, #3
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 42, #3
+; EG-NEXT: ALU clause starting at 12:
+; EG-NEXT: MOV * T0.X, 0.0,
+; EG-NEXT: ALU clause starting at 13:
+; EG-NEXT: MULLO_INT * T0.Y, T1.X, T2.X,
+; EG-NEXT: ADD_INT * T0.W, PS, T0.X,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
+;
+; CM-LABEL: i8_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 0, @12, KC0[], KC1[]
+; CM-NEXT: TEX 2 @6
+; CM-NEXT: ALU 8, @13, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 6:
+; CM-NEXT: VTX_READ_8 T1.X, T0.X, 40, #3
+; CM-NEXT: VTX_READ_8 T2.X, T0.X, 41, #3
+; CM-NEXT: VTX_READ_8 T0.X, T0.X, 42, #3
+; CM-NEXT: ALU clause starting at 12:
+; CM-NEXT: MOV * T0.X, 0.0,
+; CM-NEXT: ALU clause starting at 13:
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Y, T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T1.X, T2.X,
+; CM-NEXT: ADD_INT * T0.W, PV.Y, T0.X,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i8_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dword s2, s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshr_b32 s4, s2, 8
+; GCN-NEXT: s_lshr_b32 s5, s2, 16
+; GCN-NEXT: s_mul_i32 s2, s2, s4
+; GCN-NEXT: s_add_i32 s2, s2, s5
+; GCN-NEXT: s_sext_i32_i8 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: i8_mad24:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_lshr_b32 s4, s6, 8
+; GFX8-NEXT: s_lshr_b32 s5, s6, 16
+; GFX8-NEXT: s_mul_i32 s4, s6, s4
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_sext_i32_i8 s4, s4
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = mul i8 %a, %b
%1 = add i8 %0, %c
@@ -72,11 +266,75 @@ entry:
; 24-bit mad pattern wasn't being matched.
; Check that the select instruction is not deleted.
-; FUNC-LABEL: {{^}}i24_i32_i32_mad:
-; EG: CNDE_INT
-; SI: s_cselect
-; GCN2: s_cselect
define amdgpu_kernel void @i24_i32_i32_mad(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+; EG-LABEL: i24_i32_i32_mad:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: ALU clause starting at 4:
+; EG-NEXT: ASHR * T0.W, KC0[2].Z, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T0.W, KC0[3].X, literal.x, PV.W,
+; EG-NEXT: 34(4.764415e-44), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.X, PV.W, KC0[3].X,
+; EG-NEXT: ADD_INT T0.X, PS, KC0[3].Y,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: i24_i32_i32_mad:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 10, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: ALU clause starting at 4:
+; CM-NEXT: ASHR * T0.W, KC0[2].Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: CNDE_INT * T0.W, KC0[3].X, literal.x, PV.W,
+; CM-NEXT: 34(4.764415e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, KC0[3].X,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, KC0[3].X,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, KC0[3].X,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, KC0[3].X,
+; CM-NEXT: ADD_INT * T0.X, PV.X, KC0[3].Y,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i24_i32_i32_mad:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dword s2, s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_ashr_i32 s2, s2, 8
+; GCN-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-NEXT: s_cselect_b32 s2, s2, 34
+; GCN-NEXT: s_mul_i32 s2, s2, s6
+; GCN-NEXT: s_add_i32 s4, s2, s7
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: i24_i32_i32_mad:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_ashr_i32 s4, s8, 8
+; GFX8-NEXT: s_cmp_lg_u32 s6, 0
+; GFX8-NEXT: s_cselect_b32 s4, s4, 34
+; GFX8-NEXT: s_mul_i32 s4, s4, s6
+; GFX8-NEXT: s_add_i32 s4, s4, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = ashr i32 %a, 8
%1 = icmp ne i32 %c, 0
@@ -87,13 +345,139 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}extra_and:
-; SI-NOT: v_and
-; SI: s_mul_i32
-; SI: s_mul_i32
-; SI: s_add_i32
-; SI: s_add_i32
define amdgpu_kernel void @extra_and(ptr addrspace(1) %arg, i32 %arg2, i32 %arg3) {
+; EG-LABEL: extra_and:
+; EG: ; %bb.0: ; %bb
+; EG-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @7
+; EG-NEXT: ALU_PUSH_BEFORE 12, @16, KC0[], KC1[]
+; EG-NEXT: JUMP @6 POP:1
+; EG-NEXT: LOOP_BREAK @6
+; EG-NEXT: POP @6 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: ALU 1, @29, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: MOV * T3.W, PV.W,
+; EG-NEXT: MOV T0.Z, KC0[2].Y,
+; EG-NEXT: MOV T0.W, KC0[2].Z,
+; EG-NEXT: MOV * T2.W, KC0[2].W,
+; EG-NEXT: ALU clause starting at 16:
+; EG-NEXT: AND_INT T1.W, T1.W, literal.x,
+; EG-NEXT: AND_INT * T4.W, T3.W, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: AND_INT T3.W, T3.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.W,
+; EG-NEXT: ADD_INT T3.W, T2.W, PS,
+; EG-NEXT: ADD_INT * T1.W, T0.W, T0.X,
+; EG-NEXT: ADD_INT * T0.X, PS, PV.W,
+; EG-NEXT: SETNE_INT * T4.W, PV.X, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; EG-NEXT: ALU clause starting at 29:
+; EG-NEXT: LSHR * T1.X, T0.Z, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: extra_and:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @7
+; CM-NEXT: ALU_PUSH_BEFORE 17, @16, KC0[], KC1[]
+; CM-NEXT: JUMP @6 POP:1
+; CM-NEXT: LOOP_BREAK @6
+; CM-NEXT: POP @6 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: ALU 1, @34, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 10:
+; CM-NEXT: MOV * T0.W, literal.x,
+; CM-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; CM-NEXT: MOV * T1.Z, PV.W,
+; CM-NEXT: MOV T0.Y, KC0[2].Y,
+; CM-NEXT: MOV T0.Z, KC0[2].Z,
+; CM-NEXT: MOV * T1.W, KC0[2].W,
+; CM-NEXT: ALU clause starting at 16:
+; CM-NEXT: AND_INT T1.Y, T1.Z, literal.x,
+; CM-NEXT: AND_INT T2.Z, T0.W, literal.x,
+; CM-NEXT: AND_INT * T0.W, T1.Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W, T1.Y, T2.Z,
+; CM-NEXT: ADD_INT T1.Z, T1.W, PV.W,
+; CM-NEXT: ADD_INT * T0.W, T0.Z, T0.X,
+; CM-NEXT: ADD_INT * T0.X, PV.W, PV.Z,
+; CM-NEXT: SETNE_INT * T2.W, PV.X, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; CM-NEXT: ALU clause starting at 34:
+; CM-NEXT: LSHR * T1.X, T0.Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: extra_and:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xb
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: .LBB4_1: ; %bb4
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_and_b32 s3, s6, 0xffffff
+; GCN-NEXT: s_and_b32 s6, s6, 0xffffff
+; GCN-NEXT: s_and_b32 s2, s2, 0xffffff
+; GCN-NEXT: s_mul_i32 s3, s3, s2
+; GCN-NEXT: s_mul_i32 s6, s6, s2
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s2, s0, s3
+; GCN-NEXT: s_add_i32 s6, s1, s6
+; GCN-NEXT: s_add_i32 s3, s2, s6
+; GCN-NEXT: s_cmp_lg_u32 s3, 8
+; GCN-NEXT: s_cbranch_scc1 .LBB4_1
+; GCN-NEXT: ; %bb.2: ; %bb18
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s3
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: extra_and:
+; GFX8: ; %bb.0: ; %bb
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; GFX8-NEXT: s_mov_b32 s2, 0
+; GFX8-NEXT: s_mov_b32 s6, 0
+; GFX8-NEXT: .LBB4_1: ; %bb4
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_and_b32 s3, s6, 0xffffff
+; GFX8-NEXT: s_and_b32 s6, s6, 0xffffff
+; GFX8-NEXT: s_and_b32 s2, s2, 0xffffff
+; GFX8-NEXT: s_mul_i32 s3, s3, s2
+; GFX8-NEXT: s_mul_i32 s6, s6, s2
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_add_i32 s2, s0, s3
+; GFX8-NEXT: s_add_i32 s6, s1, s6
+; GFX8-NEXT: s_add_i32 s3, s2, s6
+; GFX8-NEXT: s_cmp_lg_u32 s3, 8
+; GFX8-NEXT: s_cbranch_scc1 .LBB4_1
+; GFX8-NEXT: ; %bb.2: ; %bb18
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s3
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
bb:
br label %bb4
@@ -119,13 +503,139 @@ bb18: ; preds = %bb4
ret void
}
-; FUNC-LABEL: {{^}}dont_remove_shift
-; SI: s_lshr
-; SI: s_mul_i32
-; SI: s_mul_i32
-; SI: s_add_i32
-; SI: s_add_i32
define amdgpu_kernel void @dont_remove_shift(ptr addrspace(1) %arg, i32 %arg2, i32 %arg3) {
+; EG-LABEL: dont_remove_shift:
+; EG: ; %bb.0: ; %bb
+; EG-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @7
+; EG-NEXT: ALU_PUSH_BEFORE 12, @16, KC0[], KC1[]
+; EG-NEXT: JUMP @6 POP:1
+; EG-NEXT: LOOP_BREAK @6
+; EG-NEXT: POP @6 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: ALU 1, @29, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: MOV * T3.W, PV.W,
+; EG-NEXT: MOV T0.Z, KC0[2].Y,
+; EG-NEXT: MOV T0.W, KC0[2].Z,
+; EG-NEXT: MOV * T2.W, KC0[2].W,
+; EG-NEXT: ALU clause starting at 16:
+; EG-NEXT: LSHR T1.W, T1.W, literal.x,
+; EG-NEXT: LSHR * T4.W, T3.W, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T3.W, T3.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.W,
+; EG-NEXT: ADD_INT T3.W, T2.W, PS,
+; EG-NEXT: ADD_INT * T1.W, T0.W, T0.X,
+; EG-NEXT: ADD_INT * T0.X, PS, PV.W,
+; EG-NEXT: SETNE_INT * T4.W, PV.X, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; EG-NEXT: ALU clause starting at 29:
+; EG-NEXT: LSHR * T1.X, T0.Z, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: dont_remove_shift:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @7
+; CM-NEXT: ALU_PUSH_BEFORE 17, @16, KC0[], KC1[]
+; CM-NEXT: JUMP @6 POP:1
+; CM-NEXT: LOOP_BREAK @6
+; CM-NEXT: POP @6 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: ALU 1, @34, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 10:
+; CM-NEXT: MOV * T0.W, literal.x,
+; CM-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; CM-NEXT: MOV * T1.Z, PV.W,
+; CM-NEXT: MOV T0.Y, KC0[2].Y,
+; CM-NEXT: MOV T0.Z, KC0[2].Z,
+; CM-NEXT: MOV * T1.W, KC0[2].W,
+; CM-NEXT: ALU clause starting at 16:
+; CM-NEXT: LSHR T1.Y, T1.Z, literal.x,
+; CM-NEXT: LSHR T2.Z, T0.W, literal.x,
+; CM-NEXT: LSHR * T0.W, T1.Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W, T1.Y, T2.Z,
+; CM-NEXT: ADD_INT T1.Z, T1.W, PV.W,
+; CM-NEXT: ADD_INT * T0.W, T0.Z, T0.X,
+; CM-NEXT: ADD_INT * T0.X, PV.W, PV.Z,
+; CM-NEXT: SETNE_INT * T2.W, PV.X, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; CM-NEXT: ALU clause starting at 34:
+; CM-NEXT: LSHR * T1.X, T0.Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: dont_remove_shift:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xb
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: .LBB5_1: ; %bb4
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_lshr_b32 s3, s6, 8
+; GCN-NEXT: s_lshr_b32 s6, s6, 8
+; GCN-NEXT: s_lshr_b32 s2, s2, 8
+; GCN-NEXT: s_mul_i32 s3, s3, s2
+; GCN-NEXT: s_mul_i32 s6, s6, s2
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s2, s0, s3
+; GCN-NEXT: s_add_i32 s6, s1, s6
+; GCN-NEXT: s_add_i32 s3, s2, s6
+; GCN-NEXT: s_cmp_lg_u32 s3, 8
+; GCN-NEXT: s_cbranch_scc1 .LBB5_1
+; GCN-NEXT: ; %bb.2: ; %bb18
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s3
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: dont_remove_shift:
+; GFX8: ; %bb.0: ; %bb
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; GFX8-NEXT: s_mov_b32 s2, 0
+; GFX8-NEXT: s_mov_b32 s6, 0
+; GFX8-NEXT: .LBB5_1: ; %bb4
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_lshr_b32 s3, s6, 8
+; GFX8-NEXT: s_lshr_b32 s6, s6, 8
+; GFX8-NEXT: s_lshr_b32 s2, s2, 8
+; GFX8-NEXT: s_mul_i32 s3, s3, s2
+; GFX8-NEXT: s_mul_i32 s6, s6, s2
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_add_i32 s2, s0, s3
+; GFX8-NEXT: s_add_i32 s6, s1, s6
+; GFX8-NEXT: s_add_i32 s3, s2, s6
+; GFX8-NEXT: s_cmp_lg_u32 s3, 8
+; GFX8-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX8-NEXT: ; %bb.2: ; %bb18
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s3
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
bb:
br label %bb4
@@ -151,19 +661,234 @@ bb18: ; preds = %bb4
ret void
}
-; FUNC-LABEL: {{^}}i8_mad_sat_16:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
-; SI: v_med3_i32 v{{[0-9]}}, [[EXT]],
-; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_max_i16_e32 [[MAX:v[0-9]]], 0xff80, [[MAD]]
-; VI: v_min_i16_e32 {{v[0-9]}}, 0x7f, [[MAX]]
define amdgpu_kernel void @i8_mad_sat_16(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(5) %idx) {
+; EG-LABEL: i8_mad_sat_16:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @8
+; EG-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @10
+; EG-NEXT: ALU 24, @21, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 8:
+; EG-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; EG-NEXT: Fetch clause starting at 10:
+; EG-NEXT: VTX_READ_8 T3.X, T3.X, 0, #1
+; EG-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; EG-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.X,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ADD_INT T2.X, KC0[2].Z, T0.X,
+; EG-NEXT: ADD_INT * T3.X, KC0[3].X, T0.X,
+; EG-NEXT: ALU clause starting at 21:
+; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x,
+; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T1.W, T3.X, 0.0, literal.x,
+; EG-NEXT: MULLO_INT * T0.Y, PV.Z, PV.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: MAX_INT T0.W, PV.W, literal.x,
+; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, T0.X,
+; EG-NEXT: -128(nan), 0(0.000000e+00)
+; EG-NEXT: AND_INT T2.W, PS, literal.x,
+; EG-NEXT: MIN_INT * T0.W, PV.W, literal.y,
+; EG-NEXT: 3(4.203895e-45), 127(1.779649e-43)
+; EG-NEXT: AND_INT T0.W, PS, literal.x,
+; EG-NEXT: LSHL * T2.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, T1.W, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: i8_mad_sat_16:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 0 @8
+; CM-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 1 @10
+; CM-NEXT: ALU 26, @21, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT MSKOR T1.XW, T0.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 8:
+; CM-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; CM-NEXT: Fetch clause starting at 10:
+; CM-NEXT: VTX_READ_8 T3.X, T3.X, 0, #1
+; CM-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; CM-NEXT: ALU clause starting at 14:
+; CM-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; CM-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; CM-NEXT: ADD_INT * T1.X, KC0[3].X, PV.X,
+; CM-NEXT: ALU clause starting at 19:
+; CM-NEXT: ADD_INT * T2.X, KC0[2].W, T0.X,
+; CM-NEXT: ADD_INT * T3.X, KC0[2].Z, T0.X,
+; CM-NEXT: ALU clause starting at 21:
+; CM-NEXT: BFE_INT T0.Y, T1.X, 0.0, literal.x,
+; CM-NEXT: BFE_INT T0.Z, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: BFE_INT * T0.W, T3.X, 0.0, literal.x, BS:VEC_201
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.Z, T0.W,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.Z, T0.W,
+; CM-NEXT: MULLO_INT T0.Z, T0.Z, T0.W,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.Z, T0.W,
+; CM-NEXT: ADD_INT * T0.W, PV.Z, T0.Y,
+; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: MAX_INT T0.Z, PV.W, literal.x,
+; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, T0.X,
+; CM-NEXT: -128(nan), 0(0.000000e+00)
+; CM-NEXT: AND_INT T1.Z, PV.W, literal.x,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.y,
+; CM-NEXT: 3(4.203895e-45), 127(1.779649e-43)
+; CM-NEXT: AND_INT T0.Z, PV.W, literal.x,
+; CM-NEXT: LSHL * T1.W, PV.Z, literal.y,
+; CM-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; CM-NEXT: LSHL T1.X, PV.Z, PV.W,
+; CM-NEXT: LSHL * T1.W, literal.x, PV.W,
+; CM-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; CM-NEXT: MOV T1.Y, 0.0,
+; CM-NEXT: MOV * T1.Z, 0.0,
+; CM-NEXT: LSHR * T0.X, T0.W, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i8_mad_sat_16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s20, SCRATCH_RSRC_DWORD0
+; GCN-NEXT: s_mov_b32 s21, SCRATCH_RSRC_DWORD1
+; GCN-NEXT: s_mov_b32 s22, -1
+; GCN-NEXT: s_mov_b32 s23, 0xe8f000
+; GCN-NEXT: s_add_u32 s20, s20, s11
+; GCN-NEXT: s_addc_u32 s21, s21, 0
+; GCN-NEXT: s_load_dword s8, s[4:5], 0x11
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s9, s8, 4
+; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: buffer_load_dword v1, v1, s[20:23], 0 offen
+; GCN-NEXT: buffer_load_dword v0, v0, s[20:23], 0 offen
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b64 s[14:15], s[10:11]
+; GCN-NEXT: s_mov_b64 s[18:19], s[10:11]
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[8:9], s[2:3]
+; GCN-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-NEXT: s_mov_b64 s[16:17], s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_sbyte v2, v[0:1], s[12:15], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v3, v[0:1], s[8:11], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v4, v[0:1], s[16:19], 0 addr64
+; GCN-NEXT: s_movk_i32 s2, 0xff80
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v2, v2, v3, v4
+; GCN-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GCN-NEXT: v_mov_b32_e32 v3, 0x7f
+; GCN-NEXT: v_med3_i32 v2, v2, s2, v3
+; GCN-NEXT: s_mov_b64 s[2:3], s[10:11]
+; GCN-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64
+; GCN-NEXT: s_endpgm
+;
+; SI-LABEL: i8_mad_sat_16:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x44
+; SI-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
+; SI-NEXT: s_mov_b32 s90, -1
+; SI-NEXT: s_mov_b32 s91, 0xe80000
+; SI-NEXT: s_add_u32 s88, s88, s11
+; SI-NEXT: s_addc_u32 s89, s89, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_i32 s1, s0, 4
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_load_dword v6, v0, s[88:91], 0 offen
+; SI-NEXT: v_mov_b32_e32 v0, s1
+; SI-NEXT: buffer_load_dword v7, v0, s[88:91], 0 offen
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
+; SI-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; SI-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; SI-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc
+; SI-NEXT: flat_load_sbyte v0, v[0:1]
+; SI-NEXT: flat_load_sbyte v1, v[2:3]
+; SI-NEXT: flat_load_sbyte v2, v[4:5]
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mad_u16 v0, v1, v0, v2
+; SI-NEXT: v_max_i16_e32 v0, 0xff80, v0
+; SI-NEXT: v_min_i16_e32 v2, 0x7f, v0
+; SI-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v7, vcc
+; SI-NEXT: flat_store_byte v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: i8_mad_sat_16:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; VI-NEXT: s_mov_b32 s14, -1
+; VI-NEXT: s_mov_b32 s15, 0xe80000
+; VI-NEXT: s_add_u32 s12, s12, s11
+; VI-NEXT: s_addc_u32 s13, s13, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_i32 s1, s0, 4
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_load_dword v6, v0, s[12:15], 0 offen
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: buffer_load_dword v7, v0, s[12:15], 0 offen
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; VI-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc
+; VI-NEXT: flat_load_sbyte v0, v[0:1]
+; VI-NEXT: flat_load_sbyte v1, v[2:3]
+; VI-NEXT: flat_load_sbyte v2, v[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u16 v0, v1, v0, v2
+; VI-NEXT: v_max_i16_e32 v0, 0xff80, v0
+; VI-NEXT: v_min_i16_e32 v2, 0x7f, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v7, vcc
+; VI-NEXT: flat_store_byte v[0:1], v2
+; VI-NEXT: s_endpgm
entry:
%retval.0.i = load i64, ptr addrspace(5) %idx
%arrayidx = getelementptr inbounds i8, ptr addrspace(1) %in0, i64 %retval.0.i
@@ -187,16 +912,201 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i8_mad_32:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
define amdgpu_kernel void @i8_mad_32(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(5) %idx) {
+; EG-LABEL: i8_mad_32:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @8
+; EG-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @10
+; EG-NEXT: ALU 9, @21, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 8:
+; EG-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; EG-NEXT: Fetch clause starting at 10:
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; EG-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.X,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ADD_INT T2.X, KC0[2].Z, T0.X,
+; EG-NEXT: ADD_INT * T0.X, KC0[3].X, T0.X,
+; EG-NEXT: ALU clause starting at 21:
+; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x,
+; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T1.W, T0.X, 0.0, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PV.W, PV.Z,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+;
+; CM-LABEL: i8_mad_32:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 0 @8
+; CM-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 1 @10
+; CM-NEXT: ALU 12, @21, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 8:
+; CM-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; CM-NEXT: Fetch clause starting at 10:
+; CM-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; CM-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; CM-NEXT: ALU clause starting at 14:
+; CM-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; CM-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; CM-NEXT: ADD_INT * T1.X, KC0[3].X, PV.X,
+; CM-NEXT: ALU clause starting at 19:
+; CM-NEXT: ADD_INT * T2.X, KC0[2].W, T0.X,
+; CM-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
+; CM-NEXT: ALU clause starting at 21:
+; CM-NEXT: BFE_INT T0.Y, T1.X, 0.0, literal.x,
+; CM-NEXT: BFE_INT T0.Z, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: BFE_INT * T0.W, T0.X, 0.0, literal.x, BS:VEC_201
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T0.Z,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.Y,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i8_mad_32:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
+; GCN-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
+; GCN-NEXT: s_mov_b32 s26, -1
+; GCN-NEXT: s_mov_b32 s27, 0xe8f000
+; GCN-NEXT: s_add_u32 s24, s24, s11
+; GCN-NEXT: s_addc_u32 s25, s25, 0
+; GCN-NEXT: s_load_dword s8, s[4:5], 0x11
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s9, s8, 4
+; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: buffer_load_dword v1, v1, s[24:27], 0 offen
+; GCN-NEXT: buffer_load_dword v0, v0, s[24:27], 0 offen
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s14, 0
+; GCN-NEXT: s_mov_b32 s15, s11
+; GCN-NEXT: s_mov_b64 s[18:19], s[14:15]
+; GCN-NEXT: s_mov_b64 s[22:23], s[14:15]
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[12:13], s[2:3]
+; GCN-NEXT: s_mov_b64 s[16:17], s[4:5]
+; GCN-NEXT: s_mov_b64 s[20:21], s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_sbyte v2, v[0:1], s[12:15], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v3, v[0:1], s[16:19], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v0, v[0:1], s[20:23], 0 addr64
+; GCN-NEXT: s_mov_b32 s10, -1
+; GCN-NEXT: s_mov_b32 s8, s0
+; GCN-NEXT: s_mov_b32 s9, s1
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v0, v1, v2, v0
+; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GCN-NEXT: buffer_store_dword v0, off, s[8:11], 0
+; GCN-NEXT: s_endpgm
+;
+; SI-LABEL: i8_mad_32:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x44
+; SI-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
+; SI-NEXT: s_mov_b32 s90, -1
+; SI-NEXT: s_mov_b32 s91, 0xe80000
+; SI-NEXT: s_add_u32 s88, s88, s11
+; SI-NEXT: s_addc_u32 s89, s89, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_i32 s1, s0, 4
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_load_dword v4, v0, s[88:91], 0 offen
+; SI-NEXT: v_mov_b32_e32 v0, s1
+; SI-NEXT: buffer_load_dword v5, v0, s[88:91], 0 offen
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_mov_b32_e32 v6, s7
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; SI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; SI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; SI-NEXT: flat_load_sbyte v0, v[0:1]
+; SI-NEXT: flat_load_sbyte v1, v[2:3]
+; SI-NEXT: flat_load_sbyte v2, v[4:5]
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mad_u16 v0, v0, v1, v2
+; SI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: i8_mad_32:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; VI-NEXT: s_mov_b32 s14, -1
+; VI-NEXT: s_mov_b32 s15, 0xe80000
+; VI-NEXT: s_add_u32 s12, s12, s11
+; VI-NEXT: s_addc_u32 s13, s13, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_i32 s1, s0, 4
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_load_dword v4, v0, s[12:15], 0 offen
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: buffer_load_dword v5, v0, s[12:15], 0 offen
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v6, s7
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; VI-NEXT: flat_load_sbyte v0, v[0:1]
+; VI-NEXT: flat_load_sbyte v1, v[2:3]
+; VI-NEXT: flat_load_sbyte v2, v[4:5]
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u16 v0, v0, v1, v2
+; VI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
entry:
%retval.0.i = load i64, ptr addrspace(5) %idx
%arrayidx = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %retval.0.i
@@ -215,16 +1125,207 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i8_mad_64:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
define amdgpu_kernel void @i8_mad_64(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(5) %idx) {
+; EG-LABEL: i8_mad_64:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @8
+; EG-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @10
+; EG-NEXT: ALU 11, @21, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 8:
+; EG-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; EG-NEXT: Fetch clause starting at 10:
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; EG-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.X,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ADD_INT T2.X, KC0[2].Z, T0.X,
+; EG-NEXT: ADD_INT * T0.X, KC0[3].X, T0.X,
+; EG-NEXT: ALU clause starting at 21:
+; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x,
+; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T1.W, T0.X, 0.0, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PV.W, PV.Z,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+; EG-NEXT: ASHR * T0.Y, PV.X, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+;
+; CM-LABEL: i8_mad_64:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 0 @8
+; CM-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 1 @10
+; CM-NEXT: ALU 13, @21, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 8:
+; CM-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; CM-NEXT: Fetch clause starting at 10:
+; CM-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; CM-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; CM-NEXT: ALU clause starting at 14:
+; CM-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; CM-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; CM-NEXT: ADD_INT * T1.X, KC0[3].X, PV.X,
+; CM-NEXT: ALU clause starting at 19:
+; CM-NEXT: ADD_INT * T2.X, KC0[2].W, T0.X,
+; CM-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
+; CM-NEXT: ALU clause starting at 21:
+; CM-NEXT: BFE_INT T0.Y, T1.X, 0.0, literal.x,
+; CM-NEXT: BFE_INT T0.Z, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: BFE_INT * T0.W, T0.X, 0.0, literal.x, BS:VEC_201
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T0.Z,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.Y,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: ASHR * T0.Y, PV.X, literal.y,
+; CM-NEXT: 2(2.802597e-45), 31(4.344025e-44)
+;
+; GCN-LABEL: i8_mad_64:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
+; GCN-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
+; GCN-NEXT: s_mov_b32 s26, -1
+; GCN-NEXT: s_mov_b32 s27, 0xe8f000
+; GCN-NEXT: s_add_u32 s24, s24, s11
+; GCN-NEXT: s_addc_u32 s25, s25, 0
+; GCN-NEXT: s_load_dword s8, s[4:5], 0x11
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s9, s8, 4
+; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: buffer_load_dword v1, v1, s[24:27], 0 offen
+; GCN-NEXT: buffer_load_dword v0, v0, s[24:27], 0 offen
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s14, 0
+; GCN-NEXT: s_mov_b32 s15, s11
+; GCN-NEXT: s_mov_b64 s[18:19], s[14:15]
+; GCN-NEXT: s_mov_b64 s[22:23], s[14:15]
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[12:13], s[2:3]
+; GCN-NEXT: s_mov_b64 s[16:17], s[4:5]
+; GCN-NEXT: s_mov_b64 s[20:21], s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_sbyte v2, v[0:1], s[12:15], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v3, v[0:1], s[16:19], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v0, v[0:1], s[20:23], 0 addr64
+; GCN-NEXT: s_mov_b32 s10, -1
+; GCN-NEXT: s_mov_b32 s8, s0
+; GCN-NEXT: s_mov_b32 s9, s1
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v0, v1, v2, v0
+; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT: s_endpgm
+;
+; SI-LABEL: i8_mad_64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x44
+; SI-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
+; SI-NEXT: s_mov_b32 s90, -1
+; SI-NEXT: s_mov_b32 s91, 0xe80000
+; SI-NEXT: s_add_u32 s88, s88, s11
+; SI-NEXT: s_addc_u32 s89, s89, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_i32 s1, s0, 4
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_load_dword v4, v0, s[88:91], 0 offen
+; SI-NEXT: v_mov_b32_e32 v0, s1
+; SI-NEXT: buffer_load_dword v5, v0, s[88:91], 0 offen
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_mov_b32_e32 v6, s7
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; SI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; SI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; SI-NEXT: flat_load_sbyte v0, v[0:1]
+; SI-NEXT: flat_load_sbyte v1, v[2:3]
+; SI-NEXT: flat_load_sbyte v2, v[4:5]
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mad_u16 v0, v0, v1, v2
+; SI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: i8_mad_64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; VI-NEXT: s_mov_b32 s14, -1
+; VI-NEXT: s_mov_b32 s15, 0xe80000
+; VI-NEXT: s_add_u32 s12, s12, s11
+; VI-NEXT: s_addc_u32 s13, s13, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_i32 s1, s0, 4
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_load_dword v4, v0, s[12:15], 0 offen
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: buffer_load_dword v5, v0, s[12:15], 0 offen
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v6, s7
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; VI-NEXT: flat_load_sbyte v0, v[0:1]
+; VI-NEXT: flat_load_sbyte v1, v[2:3]
+; VI-NEXT: flat_load_sbyte v2, v[4:5]
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u16 v0, v0, v1, v2
+; VI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT: s_endpgm
entry:
%retval.0.i = load i64, ptr addrspace(5) %idx
%arrayidx = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %retval.0.i
@@ -248,17 +1349,236 @@ entry:
; had a chance to form mul24. The mul combine would then see
; extractelement with no known bits and fail. All of the mul/add
; combos in this loop should form v_mad_u32_u24.
-
-; FUNC-LABEL: {{^}}mad24_known_bits_destroyed:
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
define void @mad24_known_bits_destroyed(i32 %arg, <4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3, i32 %arg4, i32 %arg5, i32 %arg6, ptr addrspace(1) %arg7, ptr addrspace(1) %arg8) #0 {
+; EG-LABEL: mad24_known_bits_destroyed:
+; EG: ; %bb.0: ; %bb
+; EG-NEXT: ALU 21, @12, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @11
+; EG-NEXT: ALU 8, @34, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T2.X, 0
+; EG-NEXT: ALU 14, @43, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 0
+; EG-NEXT: ALU_PUSH_BEFORE 3, @58, KC0[], KC1[]
+; EG-NEXT: JUMP @10 POP:1
+; EG-NEXT: LOOP_BREAK @10
+; EG-NEXT: POP @10 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: CF_END
+; EG-NEXT: ALU clause starting at 12:
+; EG-NEXT: MOV * T0.W, KC0[5].X,
+; EG-NEXT: MOV * T0.Z, KC0[4].W,
+; EG-NEXT: MOV * T0.Y, KC0[4].Z,
+; EG-NEXT: MOV T0.X, KC0[2].Y,
+; EG-NEXT: AND_INT * T1.Y, KC0[4].X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.Z, KC0[3].W, literal.x,
+; EG-NEXT: AND_INT T1.W, KC0[3].Z, literal.x,
+; EG-NEXT: MOV * T2.W, KC0[7].Y,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: LSHR T1.X, PS, literal.x,
+; EG-NEXT: AND_INT T2.Y, KC0[6].Y, literal.y,
+; EG-NEXT: MOV T2.Z, KC0[6].X,
+; EG-NEXT: MOV * T2.W, KC0[5].W,
+; EG-NEXT: 2(2.802597e-45), 16777215(2.350989e-38)
+; EG-NEXT: MOV * T3.W, KC0[7].X,
+; EG-NEXT: LSHR T2.X, PV.W, literal.x,
+; EG-NEXT: MOV T3.Y, KC0[5].Z,
+; EG-NEXT: MOV T3.Z, KC0[6].Z,
+; EG-NEXT: MOV * T3.W, KC0[6].W,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOV * T4.W, KC0[4].Y,
+; EG-NEXT: ALU clause starting at 34:
+; EG-NEXT: MULLO_INT * T0.X, T0.X, T2.Y,
+; EG-NEXT: ADD_INT * T4.W, PS, T3.Z,
+; EG-NEXT: AND_INT * T4.W, PV.W, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.X, PV.W, T2.Y,
+; EG-NEXT: MULLO_INT * T0.W, T0.W, T1.Y,
+; EG-NEXT: MULLO_INT * T0.Z, T0.Z, T1.Z,
+; EG-NEXT: MULLO_INT * T0.Y, T0.Y, T1.W,
+; EG-NEXT: ADD_INT * T0.X, T0.X, T3.Z,
+; EG-NEXT: ALU clause starting at 43:
+; EG-NEXT: ADD_INT * T4.W, T0.Y, T3.Y,
+; EG-NEXT: AND_INT T4.W, PV.W, literal.x,
+; EG-NEXT: ADD_INT * T5.W, T0.Z, T2.W,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.Z, PS, literal.x,
+; EG-NEXT: ADD_INT T0.W, T0.W, T2.Z,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.W,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T0.Y, PS, T3.Y,
+; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.Z, PV.Z, T1.Z,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T0.Z, PS, T2.W,
+; EG-NEXT: MULLO_INT * T0.W, PV.W, T1.Y,
+; EG-NEXT: ADD_INT * T0.W, PS, T2.Z,
+; EG-NEXT: ALU clause starting at 58:
+; EG-NEXT: ADD_INT * T3.W, T3.W, literal.x,
+; EG-NEXT: -1(nan), 0(0.000000e+00)
+; EG-NEXT: SETE_INT * T4.W, PV.W, 0.0,
+; EG-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+;
+; CM-LABEL: mad24_known_bits_destroyed:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 22, @12, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @11
+; CM-NEXT: ALU 23, @35, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T2.X
+; CM-NEXT: ALU 23, @59, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
+; CM-NEXT: ALU_PUSH_BEFORE 3, @83, KC0[], KC1[]
+; CM-NEXT: JUMP @10 POP:1
+; CM-NEXT: LOOP_BREAK @10
+; CM-NEXT: POP @10 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 12:
+; CM-NEXT: MOV * T0.W, KC0[5].X,
+; CM-NEXT: MOV * T0.Z, KC0[4].W,
+; CM-NEXT: MOV * T0.Y, KC0[4].Z,
+; CM-NEXT: MOV T0.X, KC0[2].Y,
+; CM-NEXT: AND_INT * T1.Y, KC0[4].X, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: AND_INT T1.Z, KC0[3].W, literal.x,
+; CM-NEXT: AND_INT * T1.W, KC0[3].Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: AND_INT T2.Y, KC0[6].Y, literal.x,
+; CM-NEXT: MOV T2.Z, KC0[6].X,
+; CM-NEXT: MOV * T2.W, KC0[7].Y,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: LSHR T1.X, PV.W, literal.x,
+; CM-NEXT: MOV T3.Y, KC0[5].W,
+; CM-NEXT: MOV T3.Z, KC0[5].Z,
+; CM-NEXT: MOV * T2.W, KC0[7].X,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: LSHR T2.X, PV.W, literal.x,
+; CM-NEXT: MOV T4.Y, KC0[6].Z,
+; CM-NEXT: MOV T4.Z, KC0[6].W,
+; CM-NEXT: MOV * T2.W, KC0[4].Y,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: ALU clause starting at 35:
+; CM-NEXT: MULLO_INT T0.X, T0.X, T2.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.X, T2.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.X, T2.Y,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.X, T2.Y,
+; CM-NEXT: ADD_INT * T2.W, PV.X, T4.Y,
+; CM-NEXT: AND_INT * T2.W, PV.W, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T2.W, T2.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T2.W, T2.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T2.W, T2.Y,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T2.W, T2.Y,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT * T0.W, T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT T0.Z, T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.Y, T1.W,
+; CM-NEXT: MULLO_INT T0.Y, T0.Y, T1.W,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.Y, T1.W,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.Y, T1.W,
+; CM-NEXT: ADD_INT * T0.X, T0.X, T4.Y,
+; CM-NEXT: ALU clause starting at 59:
+; CM-NEXT: ADD_INT * T2.W, T0.Y, T3.Z,
+; CM-NEXT: ADD_INT T0.Z, T0.Z, T3.Y,
+; CM-NEXT: AND_INT * T2.W, PV.W, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT T0.Y, T2.W, T1.W,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T2.W, T1.W,
+; CM-NEXT: ADD_INT T0.Y, PV.Y, T3.Z,
+; CM-NEXT: ADD_INT T5.Z, T0.W, T2.Z, BS:VEC_021/SCL_122
+; CM-NEXT: AND_INT * T0.W, T0.Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Z, T0.W, T1.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T1.Z,
+; CM-NEXT: ADD_INT T0.Z, PV.Z, T3.Y,
+; CM-NEXT: AND_INT * T0.W, T5.Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT * T0.W, T0.W, T1.Y,
+; CM-NEXT: ADD_INT * T0.W, PV.W, T2.Z,
+; CM-NEXT: ALU clause starting at 83:
+; CM-NEXT: ADD_INT * T4.Z, T4.Z, literal.x,
+; CM-NEXT: -1(nan), 0(0.000000e+00)
+; CM-NEXT: SETE_INT * T2.W, PV.Z, 0.0,
+; CM-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+;
+; GCN-LABEL: mad24_known_bits_destroyed:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v5, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v13
+; GCN-NEXT: v_and_b32_e32 v1, 0xffffff, v2
+; GCN-NEXT: v_and_b32_e32 v2, 0xffffff, v3
+; GCN-NEXT: v_and_b32_e32 v3, 0xffffff, v4
+; GCN-NEXT: s_mov_b64 s[8:9], 0
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s4, s6
+; GCN-NEXT: s_mov_b32 s5, s6
+; GCN-NEXT: .LBB9_1: ; %bb19
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: v_mad_u32_u24 v4, v5, v0, v14
+; GCN-NEXT: s_waitcnt expcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GCN-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GCN-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GCN-NEXT: v_add_i32_e32 v15, vcc, -1, v15
+; GCN-NEXT: v_mad_u32_u24 v5, v4, v0, v14
+; GCN-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GCN-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GCN-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
+; GCN-NEXT: buffer_store_dword v5, v[16:17], s[4:7], 0 addr64
+; GCN-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-NEXT: buffer_store_dwordx4 v[5:8], v[18:19], s[4:7], 0 addr64
+; GCN-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GCN-NEXT: s_cbranch_execnz .LBB9_1
+; GCN-NEXT: ; %bb.2: ; %bb18
+; GCN-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: mad24_known_bits_destroyed:
+; GFX8: ; %bb.0: ; %bb
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v0
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffffff, v13
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffffff, v2
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v3
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffffff, v4
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB9_1: ; %bb19
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: v_add_u32_e32 v15, vcc, -1, v15
+; GFX8-NEXT: v_mad_u32_u24 v4, v5, v0, v14
+; GFX8-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GFX8-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GFX8-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
+; GFX8-NEXT: v_mad_u32_u24 v5, v4, v0, v14
+; GFX8-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GFX8-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GFX8-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: flat_store_dword v[16:17], v5
+; GFX8-NEXT: flat_store_dwordx4 v[18:19], v[5:8]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB9_1
+; GFX8-NEXT: ; %bb.2: ; %bb18
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
bb:
%tmp = and i32 %arg4, 16777215
%tmp9 = extractelement <4 x i32> %arg1, i64 1
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 697bcc3..5f6d622 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -206,8 +206,11 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s18, s16, 1
-; GCN-IR-NEXT: s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT: s_or_b32 s10, s10, s11
+; GCN-IR-NEXT: s_cmp_lg_u32 s10, 0
+; GCN-IR-NEXT: s_addc_u32 s10, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s16, 63, s16
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[12:13], s16
@@ -217,9 +220,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_add_u32 s18, s2, -1
; GCN-IR-NEXT: s_addc_u32 s19, s3, -1
; GCN-IR-NEXT: s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT: s_add_u32 s12, s8, s20
-; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s14, s8, s20
+; GCN-IR-NEXT: s_addc_u32 s15, s9, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -227,19 +230,22 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_lshr_b32 s8, s11, 31
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[14:15], s[10:11]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11]
; GCN-IR-NEXT: s_sub_u32 s8, s18, s16
; GCN-IR-NEXT: s_subb_u32 s8, s19, s17
-; GCN-IR-NEXT: s_ashr_i32 s14, s8, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s8, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s16, s16, s14
-; GCN-IR-NEXT: s_subb_u32 s17, s17, s15
-; GCN-IR-NEXT: s_add_u32 s12, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT: s_ashr_i32 s12, s8, 31
+; GCN-IR-NEXT: s_mov_b32 s13, s12
+; GCN-IR-NEXT: s_and_b32 s8, s12, 1
+; GCN-IR-NEXT: s_and_b64 s[20:21], s[12:13], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s16, s16, s20
+; GCN-IR-NEXT: s_subb_u32 s17, s17, s21
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_or_b32 s20, s20, s21
+; GCN-IR-NEXT: s_cmp_lg_u32 s20, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], s[8:9]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -389,25 +395,25 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
; GCN-IR-LABEL: v_test_sdiv:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v13, 31, v3
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v1, v12, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v13
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v13
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v11, 31, v3
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v1, v10, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v11
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v11
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[6:7], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v6
; GCN-IR-NEXT: v_add_i32_e64 v2, s[6:7], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT: v_min_u32_e32 v11, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[6:7], v10, v11
+; GCN-IR-NEXT: v_min_u32_e32 v9, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[6:7], v8, v9
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
; GCN-IR-NEXT: v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
@@ -416,70 +422,69 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v14, v12
-; GCN-IR-NEXT: v_mov_b32_e32 v15, v13
+; GCN-IR-NEXT: v_mov_b32_e32 v12, v10
+; GCN-IR-NEXT: v_mov_b32_e32 v13, v11
; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v7, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v6, 0, s[4:5]
; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[6:7], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v4, v10
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v4, v11
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[6:7], v14
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v4, v8
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, v4, v9
+; GCN-IR-NEXT: v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v16, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v17, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0
; GCN-IR-NEXT: .LBB1_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v13, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v15, v14
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v11, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v13, v12
; GCN-IR-NEXT: v_xor_b32_e32 v3, v4, v0
; GCN-IR-NEXT: v_xor_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v3, v0
@@ -1293,34 +1298,37 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GCN-IR-NEXT: s_sub_u32 s2, s2, s4
; GCN-IR-NEXT: s_subb_u32 s3, s3, s4
-; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s10, s14, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
+; GCN-IR-NEXT: s_add_u32 s10, s16, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s11, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[12:13]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[12:13], exec
; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13]
; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s12, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s11, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s10, 63, s10
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], 24, s10
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[12:13], 24, s12
-; GCN-IR-NEXT: s_add_u32 s16, s2, -1
-; GCN-IR-NEXT: s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT: s_sub_u32 s10, 58, s14
-; GCN-IR-NEXT: s_subb_u32 s11, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s14, s2, -1
+; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
+; GCN-IR-NEXT: s_sub_u32 s16, 58, s16
+; GCN-IR-NEXT: s_subb_u32 s17, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1328,19 +1336,22 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s6, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s6, s16, s12
-; GCN-IR-NEXT: s_subb_u32 s6, s17, s13
-; GCN-IR-NEXT: s_ashr_i32 s14, s6, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s6, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT: s_add_u32 s10, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s6, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s6, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s10, s6, 31
+; GCN-IR-NEXT: s_mov_b32 s11, s10
+; GCN-IR-NEXT: s_and_b32 s6, s10, 1
+; GCN-IR-NEXT: s_and_b64 s[18:19], s[10:11], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT: s_add_u32 s16, s16, 1
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_or_b32 s18, s18, s19
+; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[6:7]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19]
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_3
; GCN-IR-NEXT: .LBB10_4: ; %Flow6
@@ -1472,17 +1483,17 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1490,69 +1501,68 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, 24, 0, s[4:5]
; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB11_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB11_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v14, vcc, 58, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB11_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB11_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB11_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0
; GCN-IR-NEXT: .LBB11_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 24, %x
ret i64 %result
@@ -1665,17 +1675,17 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1684,70 +1694,69 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5]
; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v14, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB12_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB12_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0
; GCN-IR-NEXT: .LBB12_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 32768, %x
ret i64 %result
@@ -1767,20 +1776,20 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v0, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, v1, v10, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v8
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v8
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v0, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, v1, v8, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v4
; GCN-IR-NEXT: v_add_i32_e64 v0, s[4:5], 32, v0
; GCN-IR-NEXT: v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT: v_min_u32_e32 v8, v0, v1
-; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 48, v8
+; GCN-IR-NEXT: v_min_u32_e32 v6, v0, v1
+; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 48, v6
; GCN-IR-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v8
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
@@ -1790,61 +1799,60 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB13_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT: v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], v0
+; GCN-IR-NEXT: v_mov_b32_e32 v2, 0
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB13_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[4:5], v6
-; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT: v_lshr_b64 v[4:5], v[4:5], v7
+; GCN-IR-NEXT: v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB13_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, s12, v6
+; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v4
-; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, s10, v4
+; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v0, v6, v0
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT: v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v4, v6
+; GCN-IR-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT: v_or_b32_e32 v1, v7, v1
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB13_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB13_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB13_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_or_b32_e32 v3, v3, v1
; GCN-IR-NEXT: v_or_b32_e32 v2, v2, v0
; GCN-IR-NEXT: .LBB13_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v10
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v8
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v9
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v9, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 %x, 32768
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 465024a..33b0a5d 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -170,35 +170,38 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT: s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT: s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT: s_sub_u32 s12, s10, s16
; GCN-IR-NEXT: s_subb_u32 s13, 0, 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec
; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT: s_add_u32 s16, s6, -1
-; GCN-IR-NEXT: s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT: s_add_u32 s14, s6, -1
+; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT: s_add_u32 s10, s4, s18
-; GCN-IR-NEXT: s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s16, s4, s16
+; GCN-IR-NEXT: s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -206,19 +209,22 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT: s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT: s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s4, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT: s_add_u32 s10, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s11, s10
+; GCN-IR-NEXT: s_and_b32 s4, s10, 1
+; GCN-IR-NEXT: s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT: s_add_u32 s16, s16, 1
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_or_b32 s18, s18, s19
+; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -373,12 +379,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-LABEL: v_test_srem:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v14, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v14
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
; GCN-IR-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v14, vcc
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
; GCN-IR-NEXT: v_xor_b32_e32 v2, v2, v4
; GCN-IR-NEXT: v_xor_b32_e32 v3, v3, v4
; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
@@ -386,12 +392,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT: v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT: v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v10, v11
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -400,7 +406,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v15, v14
+; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v0, 0, s[4:5]
; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc
@@ -408,54 +414,53 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v6, v12
-; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, v6, v11
+; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT: v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v16, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v17, v11, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v14, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v15, v9, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5
; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
@@ -469,10 +474,10 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, v4, v3
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v15
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v15, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = srem i64 %x, %y
ret i64 %result
@@ -1148,35 +1153,38 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[2:3], s[8:9], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[8:9]
; GCN-IR-NEXT: s_or_b64 s[10:11], s[2:3], s[10:11]
-; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s14, s12, s20
+; GCN-IR-NEXT: s_flbit_i32_b64 s18, s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s14, s12, s18
; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
; GCN-IR-NEXT: s_or_b64 s[16:17], s[10:11], s[16:17]
; GCN-IR-NEXT: s_and_b64 s[10:11], s[16:17], exec
; GCN-IR-NEXT: s_cselect_b32 s11, 0, s7
; GCN-IR-NEXT: s_cselect_b32 s10, 0, s6
-; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[20:21]
; GCN-IR-NEXT: s_mov_b64 s[2:3], 0
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s16, s14, 1
-; GCN-IR-NEXT: s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT: s_or_b32 s10, s10, s11
+; GCN-IR-NEXT: s_cmp_lg_u32 s10, 0
+; GCN-IR-NEXT: s_addc_u32 s10, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s14, 63, s14
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[6:7], s14
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[6:7], s16
-; GCN-IR-NEXT: s_add_u32 s18, s8, -1
-; GCN-IR-NEXT: s_addc_u32 s19, s9, -1
+; GCN-IR-NEXT: s_add_u32 s16, s8, -1
+; GCN-IR-NEXT: s_addc_u32 s17, s9, -1
; GCN-IR-NEXT: s_not_b64 s[2:3], s[12:13]
-; GCN-IR-NEXT: s_add_u32 s12, s2, s20
-; GCN-IR-NEXT: s_addc_u32 s13, s3, 0
-; GCN-IR-NEXT: s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT: s_add_u32 s18, s2, s18
+; GCN-IR-NEXT: s_addc_u32 s19, s3, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
; GCN-IR-NEXT: s_mov_b32 s3, 0
; GCN-IR-NEXT: .LBB8_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1184,19 +1192,22 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-IR-NEXT: s_lshr_b32 s2, s11, 31
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[16:17], s[10:11]
-; GCN-IR-NEXT: s_sub_u32 s2, s18, s14
-; GCN-IR-NEXT: s_subb_u32 s2, s19, s15
-; GCN-IR-NEXT: s_ashr_i32 s16, s2, 31
-; GCN-IR-NEXT: s_mov_b32 s17, s16
-; GCN-IR-NEXT: s_and_b32 s2, s16, 1
-; GCN-IR-NEXT: s_and_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s14, s14, s16
-; GCN-IR-NEXT: s_subb_u32 s15, s15, s17
-; GCN-IR-NEXT: s_add_u32 s12, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT: s_mov_b64 s[16:17], s[2:3]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT: s_sub_u32 s2, s16, s14
+; GCN-IR-NEXT: s_subb_u32 s2, s17, s15
+; GCN-IR-NEXT: s_ashr_i32 s12, s2, 31
+; GCN-IR-NEXT: s_mov_b32 s13, s12
+; GCN-IR-NEXT: s_and_b32 s2, s12, 1
+; GCN-IR-NEXT: s_and_b64 s[20:21], s[12:13], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s14, s14, s20
+; GCN-IR-NEXT: s_subb_u32 s15, s15, s21
+; GCN-IR-NEXT: s_add_u32 s18, s18, 1
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_or_b32 s20, s20, s21
+; GCN-IR-NEXT: s_cmp_lg_u32 s20, 0
+; GCN-IR-NEXT: s_addc_u32 s19, s19, 0
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], s[2:3]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_3
; GCN-IR-NEXT: .LBB8_4: ; %Flow7
@@ -1461,34 +1472,37 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[8:9]
; GCN-IR-NEXT: s_sub_u32 s4, s2, s8
; GCN-IR-NEXT: s_subb_u32 s5, s3, s8
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[4:5]
-; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[4:5]
+; GCN-IR-NEXT: s_add_u32 s2, s14, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s3, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[2:3], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[2:3], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 63
; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[10:11], exec
; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s8, s2, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s3, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT: s_or_b32 s9, s10, s11
+; GCN-IR-NEXT: s_cmp_lg_u32 s9, 0
+; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s2, 63, s2
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_lshl_b64 s[2:3], 24, s2
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s8
-; GCN-IR-NEXT: s_add_u32 s14, s4, -1
-; GCN-IR-NEXT: s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_add_u32 s12, s4, -1
+; GCN-IR-NEXT: s_addc_u32 s13, s5, -1
+; GCN-IR-NEXT: s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1496,19 +1510,22 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s6, s3, 31
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s6, s14, s10
-; GCN-IR-NEXT: s_subb_u32 s6, s15, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT: s_mov_b32 s13, s12
-; GCN-IR-NEXT: s_and_b32 s6, s12, 1
-; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s6, s12, s10
+; GCN-IR-NEXT: s_subb_u32 s6, s13, s11
+; GCN-IR-NEXT: s_ashr_i32 s8, s6, 31
+; GCN-IR-NEXT: s_mov_b32 s9, s8
+; GCN-IR-NEXT: s_and_b32 s6, s8, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[8:9], s[4:5]
+; GCN-IR-NEXT: s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[6:7]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_3
; GCN-IR-NEXT: .LBB10_4: ; %Flow6
@@ -1647,9 +1664,9 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1663,53 +1680,52 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB11_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB11_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 58, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB11_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB11_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB11_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -1838,9 +1854,9 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1855,54 +1871,53 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB12_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB12_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB12_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -1937,20 +1952,20 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v8
; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
@@ -1961,51 +1976,50 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB13_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB13_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT: v_addc_u32_e64 v13, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB13_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB13_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB13_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB13_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -2014,10 +2028,10 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[4:5], 15
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = srem i64 %x, 32768
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll
index e1574dc..bb5918b2 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll
@@ -14,15 +14,16 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_add_u32 s0, s2, s8
-; SI-NEXT: v_mov_b32_e32 v0, s2
+; SI-NEXT: s_add_u32 s2, s2, s8
; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_addc_u32 s1, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_cmp_lg_u32 s0, 0
+; SI-NEXT: s_addc_u32 s3, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: v_mov_b32_e32 v1, s3
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v1, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -33,15 +34,15 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_add_u32 s0, s2, s4
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_add_u32 s2, s2, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s3, s3, s5
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: s_addc_u32 s1, s3, s5
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, s1
-; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_add_u32_e32 v2, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; VI-NEXT: s_endpgm
@@ -52,14 +53,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: s_add_u32 s4, s2, s6
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_addc_u32 s5, s3, s7
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT: s_add_u32 s6, s2, s6
+; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT: s_addc_u32 s4, s3, s7
+; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
@@ -71,12 +72,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX10-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_add_u32 s4, s2, s6
-; GFX10-NEXT: s_addc_u32 s5, s3, s7
-; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT: s_add_u32 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s3, s3, s7
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -87,14 +90,16 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_u32 s4, s2, s4
-; GFX11-NEXT: s_addc_u32 s5, s3, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-NEXT: s_add_u32 s2, s2, s4
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-NEXT: s_addc_u32 s3, s3, s5
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
%uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
@@ -436,21 +441,23 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_add_u32 s6, s4, s6
-; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_addc_u32 s7, s5, s7
-; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_add_u32 s4, s4, s6
+; SI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; SI-NEXT: s_or_b32 s6, s12, s13
+; SI-NEXT: s_cmp_lg_u32 s6, 0
+; SI-NEXT: s_addc_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: s_mov_b32 s0, s2
; SI-NEXT: s_mov_b32 s1, s3
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
-; SI-NEXT: v_mov_b32_e32 v3, s7
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -458,37 +465,37 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_add_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_add_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_addc_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: s_uaddo_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_addc_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_add_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_addc_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: s_uaddo_i64:
@@ -497,10 +504,12 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s12, s14
-; GFX10-NEXT: s_addc_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s1, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_addc_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX10-NEXT: global_store_byte v2, v3, s[10:11]
@@ -510,12 +519,13 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_u32 s6, s4, s6
-; GFX11-NEXT: s_addc_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_add_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_addc_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -551,10 +561,10 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v2, vcc, v0, v2
-; SI-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -574,10 +584,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI-NEXT: v_mov_b32_e32 v6, s2
; VI-NEXT: v_mov_b32_e32 v7, s3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v2, vcc, v0, v2
-; VI-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; VI-NEXT: flat_store_byte v[6:7], v0
; VI-NEXT: s_endpgm
@@ -590,10 +599,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX9-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
; GFX9-NEXT: s_endpgm
@@ -607,12 +615,11 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX10-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT: global_store_byte v4, v2, s[10:11]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: v_uaddo_i64:
@@ -624,14 +631,12 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: global_load_b64 v[0:1], v4, s[4:5]
; GFX11-NEXT: global_load_b64 v[2:3], v4, s[6:7]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT: global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT: global_store_b8 v4, v2, s[2:3]
; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 9230174..7f89581 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -693,52 +693,47 @@ define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-LABEL: v_uaddsat_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddsat_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddsat_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_uaddsat_i64:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_uaddsat_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 1ed04f8..41199b0 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -146,8 +146,11 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
@@ -157,9 +160,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_add_u32 s14, s6, -1
; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT: s_add_u32 s2, s2, s16
-; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT: s_add_u32 s10, s2, s16
+; GCN-IR-NEXT: s_addc_u32 s11, s3, 0
+; GCN-IR-NEXT: s_mov_b64 s[2:3], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -167,19 +170,22 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT: s_subb_u32 s4, s15, s13
-; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s11, s10
-; GCN-IR-NEXT: s_and_b32 s4, s10, 1
-; GCN-IR-NEXT: s_and_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s10
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s11
-; GCN-IR-NEXT: s_add_u32 s2, s2, 1
-; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[2:3], 0
-; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s2, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s2, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s2, s2, 31
+; GCN-IR-NEXT: s_mov_b32 s3, s2
+; GCN-IR-NEXT: s_and_b32 s4, s2, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[2:3], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s16
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s17
+; GCN-IR-NEXT: s_add_u32 s10, s10, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[2:3], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -313,19 +319,19 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT: v_min_u32_e32 v14, v4, v5
+; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT: v_min_u32_e32 v15, v4, v5
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[6:7], v14, v15
+; GCN-IR-NEXT: v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT: v_sub_i32_e64 v6, s[6:7], v8, v9
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[8:9]
+; GCN-IR-NEXT: v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v0, 0, s[4:5]
@@ -333,55 +339,54 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v8
-; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v10
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v0, v14
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v15
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_lshr_b64 v[0:1], v[0:1], v10
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v3, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v6, v8
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, v6, v9
+; GCN-IR-NEXT: v_addc_u32_e64 v13, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT: v_or_b32_e32 v0, v0, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v10, v0
+; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v11, v1, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v4, v8, v4
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v6
+; GCN-IR-NEXT: v_or_b32_e32 v5, v9, v5
+; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v7
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v6
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], 1
; GCN-IR-NEXT: v_or_b32_e32 v4, v7, v1
; GCN-IR-NEXT: v_or_b32_e32 v5, v6, v0
@@ -923,34 +928,37 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT: s_add_u32 s8, s14, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec
; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT: s_add_u32 s14, s2, -1
-; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_add_u32 s12, s2, -1
+; GCN-IR-NEXT: s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT: s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB8_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -958,19 +966,22 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT: s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s13, s12
-; GCN-IR-NEXT: s_and_b32 s4, s12, 1
-; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT: s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s9, s8
+; GCN-IR-NEXT: s_and_b32 s4, s8, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_3
; GCN-IR-NEXT: .LBB8_4: ; %Flow6
@@ -1094,12 +1105,12 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffd0, v10
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffd0, v8
+; GCN-IR-NEXT: v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[6:7]
-; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0x8000
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc
; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[4:5]
@@ -1109,55 +1120,54 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB9_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB9_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v8
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB9_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB9_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB9_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v0
@@ -1184,13 +1194,13 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], 48, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
; GCN-IR-NEXT: v_cndmask_b32_e64 v2, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1198,52 +1208,51 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB10_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB10_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT: v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT: v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v6
+; GCN-IR-NEXT: v_or_b32_e32 v0, v0, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s10, v0
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT: v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT: v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB10_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB10_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB10_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v0
@@ -1290,52 +1299,58 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s8, 59, s10
; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec
; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_add_u32 s11, s8, 1
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s10
-; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc4
-; GCN-IR-NEXT: s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], s11
+; GCN-IR-NEXT: s_add_u32 s10, s10, 0xffffffc4
+; GCN-IR-NEXT: s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, 23, s8
-; GCN-IR-NEXT: s_subb_u32 s4, 0, s9
-; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT: s_and_b32 s4, s10, 1
-; GCN-IR-NEXT: s_and_b32 s10, s10, 24
-; GCN-IR-NEXT: s_sub_u32 s8, s8, s10
-; GCN-IR-NEXT: s_subb_u32 s9, s9, 0
-; GCN-IR-NEXT: s_add_u32 s2, s2, 1
-; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
-; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, 23, s2
+; GCN-IR-NEXT: s_subb_u32 s4, 0, s3
+; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT: s_and_b32 s4, s8, 1
+; GCN-IR-NEXT: s_and_b32 s8, s8, 24
+; GCN-IR-NEXT: s_sub_u32 s2, s2, s8
+; GCN-IR-NEXT: s_subb_u32 s3, s3, 0
+; GCN-IR-NEXT: s_add_u32 s10, s10, 1
+; GCN-IR-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT: s_or_b32 s12, s12, s13
+; GCN-IR-NEXT: s_cmp_lg_u32 s12, 0
+; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13]
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_3
; GCN-IR-NEXT: .LBB11_4: ; %Flow6
@@ -1384,13 +1399,13 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], 59, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 59, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
; GCN-IR-NEXT: v_cndmask_b32_e64 v2, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1398,51 +1413,50 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc4, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 0xffffffc4, v6
+; GCN-IR-NEXT: v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT: v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 23, v6
+; GCN-IR-NEXT: v_or_b32_e32 v0, v0, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT: v_and_b32_e32 v8, 24, v8
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 23, v0
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT: v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT: v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB12_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB12_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index b846ce7..cdcc914 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -170,35 +170,38 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT: s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT: s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT: s_sub_u32 s12, s10, s16
; GCN-IR-NEXT: s_subb_u32 s13, 0, 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec
; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT: s_add_u32 s16, s6, -1
-; GCN-IR-NEXT: s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT: s_add_u32 s14, s6, -1
+; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT: s_add_u32 s10, s4, s18
-; GCN-IR-NEXT: s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s16, s4, s16
+; GCN-IR-NEXT: s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -206,19 +209,22 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT: s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT: s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s4, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT: s_add_u32 s10, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s11, s10
+; GCN-IR-NEXT: s_and_b32 s4, s10, 1
+; GCN-IR-NEXT: s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT: s_add_u32 s16, s16, 1
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_or_b32 s18, s18, s19
+; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -362,12 +368,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT: v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT: v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v10, v11
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -383,54 +389,53 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v6, v12
-; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, v6, v11
+; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT: v_addc_u32_e64 v15, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5
; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
@@ -948,34 +953,37 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT: s_add_u32 s8, s14, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec
; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB6_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB6_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT: s_add_u32 s14, s2, -1
-; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_add_u32 s12, s2, -1
+; GCN-IR-NEXT: s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT: s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB6_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -983,19 +991,22 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT: s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s13, s12
-; GCN-IR-NEXT: s_and_b32 s4, s12, 1
-; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT: s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s9, s8
+; GCN-IR-NEXT: s_and_b32 s4, s8, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB6_3
; GCN-IR-NEXT: .LBB6_4: ; %Flow6
@@ -1064,52 +1075,58 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s8, 59, s10
; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec
; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB7_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_add_u32 s11, s8, 1
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB7_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[10:11], s[2:3], s10
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc4
-; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s11
+; GCN-IR-NEXT: s_add_u32 s12, s10, 0xffffffc4
+; GCN-IR-NEXT: s_addc_u32 s13, 0, -1
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB7_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, 23, s10
-; GCN-IR-NEXT: s_subb_u32 s4, 0, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT: s_and_b32 s4, s12, 1
-; GCN-IR-NEXT: s_and_b32 s12, s12, 24
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, 0
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, 23, s8
+; GCN-IR-NEXT: s_subb_u32 s4, 0, s9
+; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT: s_and_b32 s4, s10, 1
+; GCN-IR-NEXT: s_and_b32 s10, s10, 24
+; GCN-IR-NEXT: s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT: s_subb_u32 s9, s9, 0
+; GCN-IR-NEXT: s_add_u32 s12, s12, 1
+; GCN-IR-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT: s_or_b32 s14, s14, s15
+; GCN-IR-NEXT: s_cmp_lg_u32 s14, 0
+; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB7_3
; GCN-IR-NEXT: .LBB7_4: ; %Flow6
@@ -1241,8 +1258,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 0xffffffd0, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1257,54 +1274,53 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB8_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB8_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB8_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB8_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB8_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB8_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -1337,8 +1353,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v8
; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1352,51 +1368,50 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB9_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB9_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT: v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB9_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB9_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB9_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll
index 0289dab..d67a7b1 100644
--- a/llvm/test/CodeGen/AMDGPU/usubo.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubo.ll
@@ -14,15 +14,16 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_sub_u32 s0, s2, s8
-; SI-NEXT: v_mov_b32_e32 v0, s2
+; SI-NEXT: s_sub_u32 s2, s2, s8
; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_subb_u32 s1, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_cmp_lg_u32 s0, 0
+; SI-NEXT: s_subb_u32 s3, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: v_mov_b32_e32 v1, s3
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v1, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -33,15 +34,15 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_sub_u32 s0, s2, s4
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_sub_u32 s2, s2, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_subb_u32 s3, s3, s5
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: s_subb_u32 s1, s3, s5
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, s1
-; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_add_u32_e32 v2, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; VI-NEXT: s_endpgm
@@ -52,14 +53,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: s_sub_u32 s4, s2, s6
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_subb_u32 s5, s3, s7
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT: s_sub_u32 s6, s2, s6
+; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT: s_subb_u32 s4, s3, s7
+; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
@@ -71,12 +72,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX10-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_sub_u32 s4, s2, s6
-; GFX10-NEXT: s_subb_u32 s5, s3, s7
-; GFX10-NEXT: v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT: s_sub_u32 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_subb_u32 s3, s3, s7
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -87,14 +90,16 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_sub_u32 s4, s2, s4
-; GFX11-NEXT: s_subb_u32 s5, s3, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-NEXT: s_sub_u32 s2, s2, s4
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-NEXT: s_subb_u32 s3, s3, s5
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
%usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
@@ -435,21 +440,23 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_u32 s6, s4, s6
-; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_subb_u32 s7, s5, s7
-; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_sub_u32 s4, s4, s6
+; SI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; SI-NEXT: s_or_b32 s6, s12, s13
+; SI-NEXT: s_cmp_lg_u32 s6, 0
+; SI-NEXT: s_subb_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: s_mov_b32 s0, s2
; SI-NEXT: s_mov_b32 s1, s3
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
-; SI-NEXT: v_mov_b32_e32 v3, s7
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -457,37 +464,37 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_sub_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_sub_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_subb_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_subb_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: s_usubo_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_subb_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_sub_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_subb_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: s_usubo_i64:
@@ -496,10 +503,12 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_u32 s0, s12, s14
-; GFX10-NEXT: s_subb_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s1, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_subb_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX10-NEXT: global_store_byte v2, v3, s[10:11]
@@ -509,12 +518,13 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_sub_u32 s6, s4, s6
-; GFX11-NEXT: s_subb_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_sub_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_subb_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -550,10 +560,10 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_sub_i32_e32 v2, vcc, v0, v2
-; SI-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; SI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -573,10 +583,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI-NEXT: v_mov_b32_e32 v6, s2
; VI-NEXT: v_mov_b32_e32 v7, s3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_sub_u32_e32 v2, vcc, v0, v2
-; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; VI-NEXT: flat_store_byte v[6:7], v0
; VI-NEXT: s_endpgm
@@ -589,10 +598,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX9-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
; GFX9-NEXT: s_endpgm
@@ -606,12 +614,11 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX10-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT: global_store_byte v4, v2, s[10:11]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: v_usubo_i64:
@@ -623,14 +630,12 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: global_load_b64 v[0:1], v4, s[4:5]
; GFX11-NEXT: global_load_b64 v[2:3], v4, s[6:7]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT: global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT: global_store_b8 v4, v2, s[2:3]
; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll
index 90491a0..3ddb2f0 100644
--- a/llvm/test/CodeGen/AMDGPU/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll
@@ -730,52 +730,38 @@ define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-LABEL: v_usubsat_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_usubsat_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_usubsat_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX10-LABEL: v_usubsat_i64:
-; GFX10: ; %bb.0:
-; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX10-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_usubsat_i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_usubsat_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10PLUS-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
+; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
}