diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/addsub64_carry.ll')
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/addsub64_carry.ll | 192 |
1 files changed, 90 insertions, 102 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll index d326966..b72eba8 100644 --- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll +++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll @@ -17,12 +17,9 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) { ; CHECK-LABEL: v_add64_32: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_add_co_u32_e32 v5, vcc, v0, v2 -; CHECK-NEXT: v_addc_co_u32_e32 v6, vcc, v1, v3, vcc -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1] -; CHECK-NEXT: v_mov_b32_e32 v0, v5 +; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 +; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc ; CHECK-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v4, vcc -; CHECK-NEXT: v_mov_b32_e32 v1, v6 ; CHECK-NEXT: s_setpc_b64 s[30:31] %sum64 = add i64 %val64A, %val64B %obit = icmp ult i64 %sum64, %val64A @@ -38,16 +35,14 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) { ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, v2, v6 +; CHECK-NEXT: v_add_co_u32_e64 v4, s[4:5], v0, v4 ; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc -; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4 -; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v5, vcc -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1] -; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7] -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3] -; CHECK-NEXT: v_mov_b32_e32 v1, v0 +; CHECK-NEXT: v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5] +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] ; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; CHECK-NEXT: v_mov_b32_e32 v1, v0 ; CHECK-NEXT: v_mov_b32_e32 v3, v2 +; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7] ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: s_setpc_b64 s[30:31] %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1) @@ -63,16 +58,14 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) { ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_sub_co_u32_e32 v6, vcc, v2, v6 +; CHECK-NEXT: v_sub_co_u32_e64 v4, s[4:5], v0, v4 ; CHECK-NEXT: v_subb_co_u32_e32 v7, vcc, v3, v7, vcc -; CHECK-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v4 -; CHECK-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v5, vcc -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1] -; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7] -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3] -; CHECK-NEXT: v_mov_b32_e32 v1, v0 +; CHECK-NEXT: v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5] +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5] ; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; CHECK-NEXT: v_mov_b32_e32 v1, v0 ; CHECK-NEXT: v_mov_b32_e32 v3, v2 +; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7] ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: s_setpc_b64 s[30:31] %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1) @@ -87,10 +80,9 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) { ; CHECK-LABEL: v_uadd_i64: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2 -; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1] -; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] +; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2 +; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc ; CHECK-NEXT: v_mov_b32_e32 v1, v0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) @@ -109,7 +101,6 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) { ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0 ; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc -; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1] ; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc ; CHECK-NEXT: v_mov_b32_e32 v1, v0 @@ -147,10 +138,9 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) { ; CHECK-LABEL: v_usub_p1: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, -1, v0 -; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1] -; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] +; CHECK-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v0 +; CHECK-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc +; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc ; CHECK-NEXT: v_mov_b32_e32 v1, v0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) @@ -167,10 +157,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) { ; CHECK-LABEL: v_usub_n1: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, 1, v0 -; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1] -; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3] +; CHECK-NEXT: v_subrev_co_u32_e32 v0, vcc, -1, v0 +; CHECK-NEXT: v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc +; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1] ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc ; CHECK-NEXT: v_mov_b32_e32 v1, v0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) @@ -190,15 +179,13 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) { define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B, i32 inreg %val32) { ; CHECK-LABEL: s_add64_32: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_add_u32 s6, s0, s2 -; CHECK-NEXT: v_mov_b32_e32 v0, s0 -; CHECK-NEXT: s_addc_u32 s7, s1, s3 -; CHECK-NEXT: v_mov_b32_e32 v1, s1 -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1] -; CHECK-NEXT: s_mov_b32 s0, s6 -; CHECK-NEXT: s_cmp_lg_u64 vcc, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s2 +; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0 +; CHECK-NEXT: s_addc_u32 s1, s1, s3 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0 ; CHECK-NEXT: s_addc_u32 s2, s4, 0 -; CHECK-NEXT: s_mov_b32 s1, s7 ; CHECK-NEXT: ; return to shader part epilog %sum64 = add i64 %val64A, %val64B %obit = icmp ult i64 %sum64, %val64A @@ -212,24 +199,24 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) { ; CHECK-LABEL: s_uadd_v2i64: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_add_u32 s6, s2, s6 -; CHECK-NEXT: v_mov_b32_e32 v9, s3 -; CHECK-NEXT: s_addc_u32 s7, s3, s7 -; CHECK-NEXT: v_mov_b32_e32 v8, s2 -; CHECK-NEXT: s_add_u32 s4, s0, s4 -; CHECK-NEXT: v_mov_b32_e32 v7, s1 -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9] -; CHECK-NEXT: s_addc_u32 s5, s1, s5 -; CHECK-NEXT: v_mov_b32_e32 v6, s0 -; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7] -; CHECK-NEXT: v_readfirstlane_b32 s2, v8 -; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; CHECK-NEXT: v_readfirstlane_b32 s0, v6 -; CHECK-NEXT: v_mov_b32_e32 v2, s4 -; CHECK-NEXT: v_mov_b32_e32 v3, s5 -; CHECK-NEXT: v_mov_b32_e32 v4, s6 -; CHECK-NEXT: v_mov_b32_e32 v5, s7 +; CHECK-NEXT: s_add_u32 s10, s2, s6 +; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0 +; CHECK-NEXT: s_addc_u32 s8, s3, s7 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_add_u32 s0, s0, s4 +; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0 +; CHECK-NEXT: s_addc_u32 s1, s1, s5 +; CHECK-NEXT: v_mov_b32_e32 v2, s0 +; CHECK-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] +; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] +; CHECK-NEXT: v_readfirstlane_b32 s0, v7 +; CHECK-NEXT: v_readfirstlane_b32 s2, v6 +; CHECK-NEXT: v_mov_b32_e32 v4, s10 +; CHECK-NEXT: v_mov_b32_e32 v5, s8 ; CHECK-NEXT: s_mov_b32 s1, s0 ; CHECK-NEXT: s_mov_b32 s3, s2 ; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5] @@ -246,24 +233,24 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) { ; CHECK-LABEL: s_usub_v2i64: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_sub_u32 s6, s2, s6 -; CHECK-NEXT: v_mov_b32_e32 v9, s3 -; CHECK-NEXT: s_subb_u32 s7, s3, s7 -; CHECK-NEXT: v_mov_b32_e32 v8, s2 -; CHECK-NEXT: s_sub_u32 s4, s0, s4 -; CHECK-NEXT: v_mov_b32_e32 v7, s1 -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[8:9] -; CHECK-NEXT: s_subb_u32 s5, s1, s5 -; CHECK-NEXT: v_mov_b32_e32 v6, s0 -; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7] -; CHECK-NEXT: v_readfirstlane_b32 s2, v8 -; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc -; CHECK-NEXT: v_readfirstlane_b32 s0, v6 -; CHECK-NEXT: v_mov_b32_e32 v2, s4 -; CHECK-NEXT: v_mov_b32_e32 v3, s5 -; CHECK-NEXT: v_mov_b32_e32 v4, s6 -; CHECK-NEXT: v_mov_b32_e32 v5, s7 +; CHECK-NEXT: s_sub_u32 s10, s2, s6 +; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0 +; CHECK-NEXT: s_subb_u32 s8, s3, s7 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_sub_u32 s0, s0, s4 +; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0 +; CHECK-NEXT: s_subb_u32 s1, s1, s5 +; CHECK-NEXT: v_mov_b32_e32 v2, s0 +; CHECK-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3] +; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1] +; CHECK-NEXT: v_readfirstlane_b32 s0, v7 +; CHECK-NEXT: v_readfirstlane_b32 s2, v6 +; CHECK-NEXT: v_mov_b32_e32 v4, s10 +; CHECK-NEXT: v_mov_b32_e32 v5, s8 ; CHECK-NEXT: s_mov_b32 s1, s0 ; CHECK-NEXT: s_mov_b32 s3, s2 ; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5] @@ -280,15 +267,15 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { ; CHECK-LABEL: s_uadd_i64: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_add_u32 s2, s0, s2 -; CHECK-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-NEXT: s_addc_u32 s3, s1, s3 +; CHECK-NEXT: s_add_u32 s0, s0, s2 +; CHECK-NEXT: s_cselect_b64 s[4:5], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0 +; CHECK-NEXT: s_addc_u32 s1, s1, s3 ; CHECK-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-NEXT: v_mov_b32_e32 v5, s3 -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] -; CHECK-NEXT: v_mov_b32_e32 v4, s2 -; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5] -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; CHECK-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 ; CHECK-NEXT: s_mov_b32 s1, s0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) @@ -305,10 +292,11 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { ; CHECK-LABEL: s_uadd_p1: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_add_u32 s0, s0, 1 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0 ; CHECK-NEXT: s_addc_u32 s1, s1, 0 -; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0 -; CHECK-NEXT: v_mov_b32_e32 v3, s1 ; CHECK-NEXT: v_mov_b32_e32 v2, s0 +; CHECK-NEXT: v_mov_b32_e32 v3, s1 ; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 ; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] @@ -350,15 +338,15 @@ define amdgpu_ps i64 @s_uadd_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { ; CHECK-LABEL: s_usub_p1: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_add_u32 s2, s0, -1 -; CHECK-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-NEXT: s_addc_u32 s3, s1, -1 +; CHECK-NEXT: s_sub_u32 s0, s0, 1 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0 +; CHECK-NEXT: s_subb_u32 s1, s1, 0 ; CHECK-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-NEXT: v_mov_b32_e32 v5, s3 -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3] -; CHECK-NEXT: v_mov_b32_e32 v4, s2 -; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5] -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; CHECK-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 ; CHECK-NEXT: s_mov_b32 s1, s0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) @@ -374,15 +362,15 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) { ; CHECK-LABEL: s_usub_n1: ; CHECK: ; %bb.0: -; CHECK-NEXT: s_add_u32 s2, s0, 1 -; CHECK-NEXT: v_mov_b32_e32 v3, s1 -; CHECK-NEXT: s_addc_u32 s3, s1, 0 +; CHECK-NEXT: s_sub_u32 s0, s0, -1 +; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 +; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0 +; CHECK-NEXT: s_subb_u32 s1, s1, -1 ; CHECK-NEXT: v_mov_b32_e32 v2, s0 -; CHECK-NEXT: v_mov_b32_e32 v5, s3 -; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3] -; CHECK-NEXT: v_mov_b32_e32 v4, s2 -; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5] -; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; CHECK-NEXT: v_mov_b32_e32 v3, s1 +; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3] +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1] ; CHECK-NEXT: v_readfirstlane_b32 s0, v0 ; CHECK-NEXT: s_mov_b32 s1, s0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) |