diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
30 files changed, 2845 insertions, 465 deletions
| diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll new file mode 100644 index 0000000..e117200 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll @@ -0,0 +1,612 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s + +define i16 @s_add_i16(i16 inreg %a, i16 inreg %b) { +; GFX7-LABEL: s_add_i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_i32 s16, s16, s17 +; GFX7-NEXT:    v_mov_b32_e32 v0, s16 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_i32 s16, s16, s17 +; GFX9-NEXT:    v_mov_b32_e32 v0, s16 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_i32 s16, s16, s17 +; GFX8-NEXT:    v_mov_b32_e32 v0, s16 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_i32 s16, s16, s17 +; GFX10-NEXT:    v_mov_b32_e32 v0, s16 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_i32 s0, s0, s1 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i16 %a, %b +  ret i16 %c +} + +define i16 @v_add_i16(i16 %a, i16 %b) { +; GFX7-LABEL: v_add_i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_nc_u16 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_nc_u16 v0.l, v0.l, v1.l +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_nc_u16 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i16 %a, %b +  ret i16 %c +} + +define i32 @s_add_i32(i32 inreg %a, i32 inreg %b) { +; GFX7-LABEL: s_add_i32: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_i32 s16, s16, s17 +; GFX7-NEXT:    v_mov_b32_e32 v0, s16 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i32: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_i32 s16, s16, s17 +; GFX9-NEXT:    v_mov_b32_e32 v0, s16 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i32: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_i32 s16, s16, s17 +; GFX8-NEXT:    v_mov_b32_e32 v0, s16 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i32: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_i32 s16, s16, s17 +; GFX10-NEXT:    v_mov_b32_e32 v0, s16 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i32: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_i32 s0, s0, s1 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i32: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i32 %a, %b +  ret i32 %c +} + +define i32 @v_add_i32(i32 %a, i32 %b) { +; GFX7-LABEL: v_add_i32: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i32: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i32: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i32: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i32: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_nc_u32_e32 v0, v0, v1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i32: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_nc_u32_e32 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i32 %a, %b +  ret i32 %c +} + +define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GFX7-LABEL: s_add_v2i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_i32 s16, s16, s18 +; GFX7-NEXT:    s_add_i32 s17, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v0, s16 +; GFX7-NEXT:    v_mov_b32_e32 v1, s17 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_v2i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_lshr_b32 s4, s16, 16 +; GFX9-NEXT:    s_lshr_b32 s5, s17, 16 +; GFX9-NEXT:    s_add_i32 s16, s16, s17 +; GFX9-NEXT:    s_add_i32 s4, s4, s5 +; GFX9-NEXT:    s_pack_ll_b32_b16 s4, s16, s4 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_v2i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_lshr_b32 s4, s16, 16 +; GFX8-NEXT:    s_lshr_b32 s5, s17, 16 +; GFX8-NEXT:    s_add_i32 s4, s4, s5 +; GFX8-NEXT:    s_add_i32 s16, s16, s17 +; GFX8-NEXT:    s_and_b32 s4, 0xffff, s4 +; GFX8-NEXT:    s_and_b32 s5, 0xffff, s16 +; GFX8-NEXT:    s_lshl_b32 s4, s4, 16 +; GFX8-NEXT:    s_or_b32 s4, s5, s4 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_v2i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_lshr_b32 s4, s16, 16 +; GFX10-NEXT:    s_lshr_b32 s5, s17, 16 +; GFX10-NEXT:    s_add_i32 s16, s16, s17 +; GFX10-NEXT:    s_add_i32 s4, s4, s5 +; GFX10-NEXT:    s_pack_ll_b32_b16 s4, s16, s4 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_v2i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_lshr_b32 s2, s0, 16 +; GFX11-NEXT:    s_lshr_b32 s3, s1, 16 +; GFX11-NEXT:    s_add_i32 s0, s0, s1 +; GFX11-NEXT:    s_add_i32 s2, s2, s3 +; GFX11-NEXT:    s_pack_ll_b32_b16 s0, s0, s2 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_v2i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_lshr_b32 s2, s0, 16 +; GFX12-NEXT:    s_lshr_b32 s3, s1, 16 +; GFX12-NEXT:    s_add_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    s_add_co_i32 s2, s2, s3 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    s_pack_ll_b32_b16 s0, s0, s2 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add <2 x i16> %a, %b +  ret <2 x i16> %c +} + +define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) { +; GFX7-LABEL: v_add_v2i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_v2i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_v2i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1 +; GFX8-NEXT:    v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_v2i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_v2i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_v2i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add <2 x i16> %a, %b +  ret <2 x i16> %c +} + +define i64 @s_add_i64(i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_add_i64: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_u32 s4, s16, s18 +; GFX7-NEXT:    s_addc_u32 s5, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v0, s4 +; GFX7-NEXT:    v_mov_b32_e32 v1, s5 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i64: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_u32 s4, s16, s18 +; GFX9-NEXT:    s_addc_u32 s5, s17, s19 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    v_mov_b32_e32 v1, s5 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i64: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_u32 s4, s16, s18 +; GFX8-NEXT:    s_addc_u32 s5, s17, s19 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    v_mov_b32_e32 v1, s5 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i64: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_u32 s4, s16, s18 +; GFX10-NEXT:    s_addc_u32 s5, s17, s19 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    v_mov_b32_e32 v1, s5 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i64: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_u32 s0, s0, s2 +; GFX11-NEXT:    s_addc_u32 s1, s1, s3 +; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i64: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i64 %a, %b +  ret i64 %c +} + +define i64 @v_add_i64(i64 %a, i64 %b) { +; GFX7-LABEL: v_add_i64: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i64: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i64: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i64: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i64: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i64: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i64 %a, %b +  ret i64 %c +} + +define void @s_uaddo_uadde(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: s_uaddo_uadde: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_u32 s4, s16, s18 +; GFX7-NEXT:    s_addc_u32 s5, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v4, s4 +; GFX7-NEXT:    s_mov_b32 s6, 0 +; GFX7-NEXT:    s_cselect_b32 s8, 1, 0 +; GFX7-NEXT:    v_mov_b32_e32 v5, s5 +; GFX7-NEXT:    s_mov_b32 s7, 0xf000 +; GFX7-NEXT:    s_mov_b64 s[4:5], 0 +; GFX7-NEXT:    buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT:    v_mov_b32_e32 v0, s8 +; GFX7-NEXT:    buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GFX7-NEXT:    s_waitcnt vmcnt(0) +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_uaddo_uadde: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_u32 s4, s16, s18 +; GFX9-NEXT:    s_addc_u32 s5, s17, s19 +; GFX9-NEXT:    v_mov_b32_e32 v4, s4 +; GFX9-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX9-NEXT:    v_mov_b32_e32 v5, s5 +; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off +; GFX9-NEXT:    v_mov_b32_e32 v0, s6 +; GFX9-NEXT:    global_store_dword v[2:3], v0, off +; GFX9-NEXT:    s_waitcnt vmcnt(0) +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_uaddo_uadde: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_u32 s4, s16, s18 +; GFX8-NEXT:    s_addc_u32 s5, s17, s19 +; GFX8-NEXT:    v_mov_b32_e32 v4, s4 +; GFX8-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX8-NEXT:    v_mov_b32_e32 v5, s5 +; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[4:5] +; GFX8-NEXT:    v_mov_b32_e32 v0, s6 +; GFX8-NEXT:    flat_store_dword v[2:3], v0 +; GFX8-NEXT:    s_waitcnt vmcnt(0) +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_uaddo_uadde: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_u32 s4, s16, s18 +; GFX10-NEXT:    s_addc_u32 s5, s17, s19 +; GFX10-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX10-NEXT:    v_mov_b32_e32 v4, s4 +; GFX10-NEXT:    v_mov_b32_e32 v5, s5 +; GFX10-NEXT:    v_mov_b32_e32 v6, s6 +; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off +; GFX10-NEXT:    global_store_dword v[2:3], v6, off +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_uaddo_uadde: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_u32 s0, s0, s2 +; GFX11-NEXT:    s_addc_u32 s1, s1, s3 +; GFX11-NEXT:    s_cselect_b32 s2, 1, 0 +; GFX11-NEXT:    v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX11-NEXT:    v_mov_b32_e32 v6, s2 +; GFX11-NEXT:    global_store_b64 v[0:1], v[4:5], off +; GFX11-NEXT:    global_store_b32 v[2:3], v6, off +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_uaddo_uadde: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_co_u32 s0, s0, s2 +; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, s3 +; GFX12-NEXT:    s_cselect_b32 s2, 1, 0 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX12-NEXT:    v_mov_b32_e32 v6, s2 +; GFX12-NEXT:    global_store_b64 v[0:1], v[4:5], off +; GFX12-NEXT:    global_store_b32 v[2:3], v6, off +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) +  %add = extractvalue {i64, i1} %uaddo, 0 +  %of = extractvalue {i64, i1} %uaddo, 1 +  %of32 = select i1 %of, i32 1, i32 0 +  store i64 %add, ptr addrspace(1) %res +  store i32 %of32, ptr addrspace(1) %carry +  ret void +} + +define void @v_uaddo_uadde(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: v_uaddo_uadde: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT:    s_mov_b32 s6, 0 +; GFX7-NEXT:    s_mov_b32 s7, 0xf000 +; GFX7-NEXT:    s_mov_b64 s[4:5], 0 +; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX7-NEXT:    buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT:    buffer_store_dword v2, v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT:    s_waitcnt vmcnt(0) +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_uaddo_uadde: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX9-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off +; GFX9-NEXT:    global_store_dword v[6:7], v2, off +; GFX9-NEXT:    s_waitcnt vmcnt(0) +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_uaddo_uadde: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX8-NEXT:    flat_store_dwordx2 v[4:5], v[0:1] +; GFX8-NEXT:    flat_store_dword v[6:7], v2 +; GFX8-NEXT:    s_waitcnt vmcnt(0) +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_uaddo_uadde: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX10-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off +; GFX10-NEXT:    global_store_dword v[6:7], v2, off +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_uaddo_uadde: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX11-NEXT:    global_store_b64 v[4:5], v[0:1], off +; GFX11-NEXT:    global_store_b32 v[6:7], v2, off +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_uaddo_uadde: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX12-NEXT:    global_store_b64 v[4:5], v[0:1], off +; GFX12-NEXT:    global_store_b32 v[6:7], v2, off +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) +  %add = extractvalue {i64, i1} %uaddo, 0 +  %of = extractvalue {i64, i1} %uaddo, 1 +  %of32 = select i1 %of, i32 1, i32 0 +  store i64 %add, ptr addrspace(1) %res +  store i32 %of32, ptr addrspace(1) %carry +  ret void +} + +declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll new file mode 100644 index 0000000..1a7ccf0 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.ll @@ -0,0 +1,66 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx700 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx803 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s + +define amdgpu_kernel void @fcmp_uniform_select(float %a, i32 %b, i32 %c, ptr addrspace(1) %out) { +; GFX7-LABEL: fcmp_uniform_select: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x9 +; GFX7-NEXT:    s_load_dword s3, s[4:5], 0xb +; GFX7-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xd +; GFX7-NEXT:    s_mov_b32 s2, -1 +; GFX7-NEXT:    s_waitcnt lgkmcnt(0) +; GFX7-NEXT:    v_cmp_eq_f32_e64 s[4:5], s6, 0 +; GFX7-NEXT:    s_or_b64 s[4:5], s[4:5], s[4:5] +; GFX7-NEXT:    s_cselect_b32 s4, 1, 0 +; GFX7-NEXT:    s_and_b32 s4, s4, 1 +; GFX7-NEXT:    s_cmp_lg_u32 s4, 0 +; GFX7-NEXT:    s_cselect_b32 s3, s7, s3 +; GFX7-NEXT:    v_mov_b32_e32 v0, s3 +; GFX7-NEXT:    s_mov_b32 s3, 0xf000 +; GFX7-NEXT:    buffer_store_dword v0, off, s[0:3], 0 +; GFX7-NEXT:    s_endpgm +; +; GFX8-LABEL: fcmp_uniform_select: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX8-NEXT:    s_load_dword s6, s[4:5], 0x2c +; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34 +; GFX8-NEXT:    s_waitcnt lgkmcnt(0) +; GFX8-NEXT:    v_cmp_eq_f32_e64 s[4:5], s0, 0 +; GFX8-NEXT:    s_cmp_lg_u64 s[4:5], 0 +; GFX8-NEXT:    s_cselect_b32 s0, 1, 0 +; GFX8-NEXT:    s_and_b32 s0, s0, 1 +; GFX8-NEXT:    s_cmp_lg_u32 s0, 0 +; GFX8-NEXT:    s_cselect_b32 s0, s1, s6 +; GFX8-NEXT:    v_mov_b32_e32 v0, s2 +; GFX8-NEXT:    v_mov_b32_e32 v2, s0 +; GFX8-NEXT:    v_mov_b32_e32 v1, s3 +; GFX8-NEXT:    flat_store_dword v[0:1], v2 +; GFX8-NEXT:    s_endpgm +; +; GFX11-LABEL: fcmp_uniform_select: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_clause 0x2 +; GFX11-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24 +; GFX11-NEXT:    s_load_b32 s6, s[4:5], 0x2c +; GFX11-NEXT:    s_load_b64 s[2:3], s[4:5], 0x34 +; GFX11-NEXT:    v_mov_b32_e32 v1, 0 +; GFX11-NEXT:    s_waitcnt lgkmcnt(0) +; GFX11-NEXT:    v_cmp_eq_f32_e64 s0, s0, 0 +; GFX11-NEXT:    s_cmp_lg_u32 s0, 0 +; GFX11-NEXT:    s_cselect_b32 s0, 1, 0 +; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT:    s_and_b32 s0, s0, 1 +; GFX11-NEXT:    s_cmp_lg_u32 s0, 0 +; GFX11-NEXT:    s_cselect_b32 s0, s1, s6 +; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    global_store_b32 v1, v0, s[2:3] +; GFX11-NEXT:    s_endpgm +  %cmp = fcmp oeq float %a, 0.0 +  %sel = select i1 %cmp, i32 %b, i32 %c +  store i32 %sel, ptr addrspace(1) %out +  ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir new file mode 100644 index 0000000..67cc016 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-copy-scc-vcc.mir @@ -0,0 +1,37 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn -mcpu=gfx700 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GFX7 %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx803 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GF8 %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck -check-prefixes=GFX11 %s + +--- +name: test_copy_scc_vcc +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | +  bb.0: +    ; GFX7-LABEL: name: test_copy_scc_vcc +    ; GFX7: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF +    ; GFX7-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64 = S_OR_B64 [[DEF]], [[DEF]], implicit-def $scc +    ; GFX7-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc +    ; GFX7-NEXT: $sgpr0 = COPY [[COPY]] +    ; GFX7-NEXT: S_ENDPGM 0, implicit $sgpr0 +    ; +    ; GF8-LABEL: name: test_copy_scc_vcc +    ; GF8: [[DEF:%[0-9]+]]:sreg_64_xexec = IMPLICIT_DEF +    ; GF8-NEXT: S_CMP_LG_U64 [[DEF]], 0, implicit-def $scc +    ; GF8-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc +    ; GF8-NEXT: $sgpr0 = COPY [[COPY]] +    ; GF8-NEXT: S_ENDPGM 0, implicit $sgpr0 +    ; +    ; GFX11-LABEL: name: test_copy_scc_vcc +    ; GFX11: [[DEF:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF +    ; GFX11-NEXT: S_CMP_LG_U32 [[DEF]], 0, implicit-def $scc +    ; GFX11-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $scc +    ; GFX11-NEXT: $sgpr0 = COPY [[COPY]] +    ; GFX11-NEXT: S_ENDPGM 0, implicit $sgpr0 +    %0:vcc(s1) = G_IMPLICIT_DEF +    %1:sgpr(s32) = G_AMDGPU_COPY_SCC_VCC %0 +    $sgpr0 = COPY %1 +    S_ENDPGM 0, implicit $sgpr0 +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll index 7714c03..d3e2118 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll @@ -113,9 +113,9 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT:    s_cmp_eq_u32 s0, 0 +; CHECK-NEXT:    s_xor_b32 s0, s0, 1 +; CHECK-NEXT:    s_and_b32 s0, s0, 1 +; CHECK-NEXT:    s_cmp_lg_u32 s0, 0  ; CHECK-NEXT:    s_cbranch_scc1 .LBB8_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42 @@ -161,16 +161,17 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s0, 0, s0 +; CHECK-NEXT:    s_xor_b32 s0, s0, 1 +; CHECK-NEXT:    s_xor_b32 s0, s0, 1 +; CHECK-NEXT:    s_and_b32 s0, s0, 1  ; CHECK-NEXT:    s_cmp_lg_u32 s0, 0 -; CHECK-NEXT:    s_cbranch_scc0 .LBB10_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB10_3 -; CHECK-NEXT:  .LBB10_2: ; %true +; CHECK-NEXT:    s_cbranch_scc1 .LBB10_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB10_3 +; CHECK-NEXT:  .LBB10_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB10_3  ; CHECK-NEXT:  .LBB10_3:    %c = trunc i32 %v to i1    %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -208,11 +209,7 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 -; CHECK-NEXT:    s_cselect_b32 s0, 1, 0 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT:    s_cmp_eq_u32 s0, 0 +; CHECK-NEXT:    s_cmp_ge_u32 s0, 12  ; CHECK-NEXT:    s_cbranch_scc1 .LBB12_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42 @@ -258,17 +255,13 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:  ; CHECK:       ; %bb.0:  ; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 -; CHECK-NEXT:    s_cselect_b32 s0, 1, 0 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT:    s_cmp_lg_u32 s0, 0 -; CHECK-NEXT:    s_cbranch_scc0 .LBB14_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB14_3 -; CHECK-NEXT:  .LBB14_2: ; %true +; CHECK-NEXT:    s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB14_3 +; CHECK-NEXT:  .LBB14_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB14_3  ; CHECK-NEXT:  .LBB14_3:    %c = icmp ult i32 %v, 12    %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -310,14 +303,12 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 +; CHECK-NEXT:    s_cmp_ge_u32 s0, 12  ; CHECK-NEXT:    s_cselect_b32 s0, 1, 0 -; CHECK-NEXT:    s_cmp_gt_u32 s1, 34 +; CHECK-NEXT:    s_cmp_le_u32 s1, 34  ; CHECK-NEXT:    s_cselect_b32 s1, 1, 0 -; CHECK-NEXT:    s_and_b32 s0, s0, s1 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s0, 0, s0 -; CHECK-NEXT:    s_cmp_eq_u32 s0, 0 +; CHECK-NEXT:    s_or_b32 s0, s0, s1 +; CHECK-NEXT:    s_cmp_lg_u32 s0, 0  ; CHECK-NEXT:    s_cbranch_scc1 .LBB16_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42 @@ -372,16 +363,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg  ; CHECK-NEXT:    s_cmp_gt_u32 s1, 34  ; CHECK-NEXT:    s_cselect_b32 s1, 1, 0  ; CHECK-NEXT:    s_and_b32 s0, s0, s1 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s0, 0, s0  ; CHECK-NEXT:    s_cmp_lg_u32 s0, 0 -; CHECK-NEXT:    s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB18_3 -; CHECK-NEXT:  .LBB18_2: ; %true +; CHECK-NEXT:    s_cbranch_scc1 .LBB18_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB18_3 +; CHECK-NEXT:  .LBB18_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB18_3  ; CHECK-NEXT:  .LBB18_3:    %v1c = icmp ult i32 %v1, 12    %v2c = icmp ugt i32 %v2, 34 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll index 7b81669..250fbc7 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll @@ -116,9 +116,9 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT:    s_xor_b32 s0, s0, 1 +; CHECK-NEXT:    s_and_b32 s0, s0, 1 +; CHECK-NEXT:    s_cmp_lg_u32 s0, 0  ; CHECK-NEXT:    s_cbranch_scc1 .LBB8_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42 @@ -164,16 +164,17 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0 -; CHECK-NEXT:    s_cbranch_scc0 .LBB10_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB10_3 -; CHECK-NEXT:  .LBB10_2: ; %true +; CHECK-NEXT:    s_xor_b32 s0, s0, 1 +; CHECK-NEXT:    s_xor_b32 s0, s0, 1 +; CHECK-NEXT:    s_and_b32 s0, s0, 1 +; CHECK-NEXT:    s_cmp_lg_u32 s0, 0 +; CHECK-NEXT:    s_cbranch_scc1 .LBB10_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB10_3 +; CHECK-NEXT:  .LBB10_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB10_3  ; CHECK-NEXT:  .LBB10_3:    %c = trunc i32 %v to i1    %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -211,11 +212,7 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 -; CHECK-NEXT:    s_cselect_b32 s0, 1, 0 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT:    s_cmp_ge_u32 s0, 12  ; CHECK-NEXT:    s_cbranch_scc1 .LBB12_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42 @@ -261,17 +258,13 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:  ; CHECK:       ; %bb.0:  ; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 -; CHECK-NEXT:    s_cselect_b32 s0, 1, 0 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0 -; CHECK-NEXT:    s_cbranch_scc0 .LBB14_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB14_3 -; CHECK-NEXT:  .LBB14_2: ; %true +; CHECK-NEXT:    s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB14_3 +; CHECK-NEXT:  .LBB14_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB14_3  ; CHECK-NEXT:  .LBB14_3:    %c = icmp ult i32 %v, 12    %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -313,14 +306,12 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 +; CHECK-NEXT:    s_cmp_ge_u32 s0, 12  ; CHECK-NEXT:    s_cselect_b32 s0, 1, 0 -; CHECK-NEXT:    s_cmp_gt_u32 s1, 34 +; CHECK-NEXT:    s_cmp_le_u32 s1, 34  ; CHECK-NEXT:    s_cselect_b32 s1, 1, 0 -; CHECK-NEXT:    s_and_b32 s0, s0, s1 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0 +; CHECK-NEXT:    s_or_b32 s0, s0, s1 +; CHECK-NEXT:    s_cmp_lg_u32 s0, 0  ; CHECK-NEXT:    s_cbranch_scc1 .LBB16_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42 @@ -375,16 +366,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg  ; CHECK-NEXT:    s_cmp_gt_u32 s1, 34  ; CHECK-NEXT:    s_cselect_b32 s1, 1, 0  ; CHECK-NEXT:    s_and_b32 s0, s0, s1 -; CHECK-NEXT:    s_and_b32 s0, 1, s0 -; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, s0 -; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0 -; CHECK-NEXT:    s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB18_3 -; CHECK-NEXT:  .LBB18_2: ; %true +; CHECK-NEXT:    s_cmp_lg_u32 s0, 0 +; CHECK-NEXT:    s_cbranch_scc1 .LBB18_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB18_3 +; CHECK-NEXT:  .LBB18_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB18_3  ; CHECK-NEXT:  .LBB18_3:    %v1c = icmp ult i32 %v1, 12    %v2c = icmp ugt i32 %v2, 34 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir new file mode 100644 index 0000000..097372a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.mir @@ -0,0 +1,524 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s +--- +name: add_s16_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1 +    ; CHECK-LABEL: name: add_s16_ss +    ; CHECK: liveins: $sgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16) +    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[ANYEXT]], [[ANYEXT1]] +    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ADD]](s32) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_ADD %2, %3 +    %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s16_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: add_s16_sv +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[COPY2]], [[TRUNC1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr0 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_ADD %2, %3 +    %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s16_vs +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: add_s16_vs +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr0 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_ADD %2, %3 +    %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s16_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1 +    ; CHECK-LABEL: name: add_s16_vv +    ; CHECK: liveins: $vgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[TRUNC1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_ADD %2, %3 +    %5:_(s16) = G_AND %4, %4 +... + +--- +name: add_s32_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1 +    ; CHECK-LABEL: name: add_s32_ss +    ; CHECK: liveins: $sgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32) = G_ADD %0, %1 +    %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s32_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: add_s32_sv +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr0 +    %2:_(s32) = G_ADD %0, %1 +    %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s32_vs +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: add_s32_vs +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr0 +    %2:_(s32) = G_ADD %0, %1 +    %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s32_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1 +    ; CHECK-LABEL: name: add_s32_vv +    ; CHECK: liveins: $vgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[ADD]], [[ADD]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32) = G_ADD %0, %1 +    %3:_(s32) = G_AND %2, %2 +... + +--- +name: add_s64_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 +    ; CHECK-LABEL: name: add_s64_ss +    ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3 +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s64) = G_ADD [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 255 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s64) = G_AND [[ADD]], [[ADD]] +    %0:_(s64) = COPY $sgpr0_sgpr1 +    %1:_(s64) = COPY $sgpr2_sgpr3 +    %2:_(s64) = G_ADD %0, %1 +    %3:_(s64) = G_CONSTANT i64 255 +    %4:_(s64) = G_AND %2, %2 +... + +--- +name: add_s64_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-LABEL: name: add_s64_sv +    ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) +    ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] +    ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) +    %0:_(s64) = COPY $sgpr0_sgpr1 +    %1:_(s64) = COPY $vgpr0_vgpr1 +    %2:_(s64) = G_ADD %0, %1 +    %3:_(s64) = G_AND %2, %2 +... + +--- +name: add_s64_vs +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-LABEL: name: add_s64_vs +    ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64) +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) +    ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] +    ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) +    %0:_(s64) = COPY $vgpr0_vgpr1 +    %1:_(s64) = COPY $sgpr0_sgpr1 +    %2:_(s64) = G_ADD %0, %1 +    %3:_(s64) = G_AND %2, %2 +... + +--- +name: add_s64_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 +    ; CHECK-LABEL: name: add_s64_vv +    ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 +    ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s64) = G_ADD [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) +    ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ADD]](s64) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] +    ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) +    %0:_(s64) = COPY $vgpr0_vgpr1 +    %1:_(s64) = COPY $vgpr2_vgpr3 +    %2:_(s64) = G_ADD %0, %1 +    %3:_(s64) = G_AND %2, %2 +... + +--- +name: uaddo_s32_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1 +    ; CHECK-LABEL: name: uaddo_s32_ss +    ; CHECK: liveins: $sgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:sgpr(s32), [[UADDO1:%[0-9]+]]:sgpr(s32) = G_UADDO [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[UADDO1]], [[C]] +    ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[SELECT]], [[UADDO]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32), %3:_(s1) = G_UADDO %0, %1 +    %4:_(s32) = G_ZEXT %3 +    %5:_(s32) = G_AND %4, %2 +... + +--- +name: uaddo_s32_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr1 +    ; CHECK-LABEL: name: uaddo_s32_sv +    ; CHECK: liveins: $sgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) +    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32), %3:_(s1) = G_UADDO %0, %1 +    %4:_(s32) = G_ZEXT %3 +    %5:_(s32) = G_AND %2, %4 +... + +--- +name: uaddo_s32_vs +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $sgpr1 +    ; CHECK-LABEL: name: uaddo_s32_vs +    ; CHECK: liveins: $vgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) +    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32), %3:_(s1) = G_UADDO %0, %1 +    %4:_(s32) = G_ZEXT %3 +    %5:_(s32) = G_AND %2, %4 +... + +--- +name: uaddo_s32_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1 +    ; CHECK-LABEL: name: uaddo_s32_vv +    ; CHECK: liveins: $vgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:vgpr(s32), [[UADDO1:%[0-9]+]]:vcc(s1) = G_UADDO [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDO]], [[SELECT]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32), %3:_(s1) = G_UADDO %0, %1 +    %4:_(s32) = G_ZEXT %3 +    %5:_(s32) = G_AND %2, %4 +... + +--- +name: uadde_s32_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1, $sgpr2 +    ; CHECK-LABEL: name: uadde_s32_ss +    ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]] +    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:sgpr(s32) = G_UADDE [[COPY]], [[COPY1]], [[AND]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE1]], [[C]] +    ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND1]](s32), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND2:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE]], [[SELECT]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 +    %6:_(s32) = G_ZEXT %5 +    %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr1, $sgpr2 +    ; CHECK-LABEL: name: uadde_s32_sv +    ; CHECK: liveins: $sgpr0, $vgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) +    ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) +    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY3]], [[COPY1]], [[AMDGPU_COPY_VCC_SCC]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 +    %6:_(s32) = G_ZEXT %5 +    %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_vs +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $sgpr1, $sgpr2 +    ; CHECK-LABEL: name: uadde_s32_vs +    ; CHECK: liveins: $vgpr0, $sgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) +    ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) +    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY3]], [[AMDGPU_COPY_VCC_SCC]] +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 +    %6:_(s32) = G_ZEXT %5 +    %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1, $vgpr2 +    ; CHECK-LABEL: name: uadde_s32_vv +    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY2]], [[C]] +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[AND]](s32), [[C1]] +    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:vgpr(s32), [[UADDE1:%[0-9]+]]:vcc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]] +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:vgpr(s32) = G_SELECT [[UADDE1]](s1), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UADDE]], [[SELECT]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32) = COPY $vgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 +    %6:_(s32) = G_ZEXT %5 +    %7:_(s32) = G_AND %4, %6 +... + +--- +name: uadde_s32_ss_scc_use +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1, $sgpr2 +    ; CHECK-LABEL: name: uadde_s32_ss_scc_use +    ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]] +    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:sgpr(s32) = G_UADDE [[COPY]], [[COPY1]], [[AND]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE1]], [[C]] +    ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND1]](s32), [[C]], [[C1]] +    ; CHECK-NEXT: [[AND2:%[0-9]+]]:sgpr(s32) = G_AND [[UADDE]], [[SELECT]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_UADDE %0, %1, %3 +    %6:_(s32) = G_ZEXT %5 +    %8:_(s32) = G_AND %4, %6 +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir index 54ee69f..30c958f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.s16.mir @@ -1,6 +1,5 @@  # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s  ---  name: add_s16_ss  legalized: true @@ -19,13 +18,13 @@ body: |      ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[ANYEXT]], [[ANYEXT1]]      ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[ADD]](s32) -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[TRUNC2]](s16) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]]      %0:_(s32) = COPY $sgpr0      %1:_(s32) = COPY $sgpr1      %2:_(s16) = G_TRUNC %0      %3:_(s16) = G_TRUNC %1      %4:_(s16) = G_ADD %2, %3 -    S_ENDPGM 0, implicit %4 +    %5:_(s16) = G_AND %4, %4  ...  --- @@ -44,13 +43,13 @@ body: |      ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)      ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[COPY2]], [[TRUNC1]] -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]      %0:_(s32) = COPY $sgpr0      %1:_(s32) = COPY $vgpr0      %2:_(s16) = G_TRUNC %0      %3:_(s16) = G_TRUNC %1      %4:_(s16) = G_ADD %2, %3 -    S_ENDPGM 0, implicit %4 +    %5:_(s16) = G_AND %4, %4  ...  --- @@ -69,13 +68,13 @@ body: |      ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32)      ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[COPY2]] -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]      %0:_(s32) = COPY $vgpr0      %1:_(s32) = COPY $sgpr0      %2:_(s16) = G_TRUNC %0      %3:_(s16) = G_TRUNC %1      %4:_(s16) = G_ADD %2, %3 -    S_ENDPGM 0, implicit %4 +    %5:_(s16) = G_AND %4, %4  ...  --- @@ -93,11 +92,11 @@ body: |      ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32)      ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s16) = G_ADD [[TRUNC]], [[TRUNC1]] -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](s16) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[ADD]], [[ADD]]      %0:_(s32) = COPY $vgpr0      %1:_(s32) = COPY $vgpr1      %2:_(s16) = G_TRUNC %0      %3:_(s16) = G_TRUNC %1      %4:_(s16) = G_ADD %2, %3 -    S_ENDPGM 0, implicit %4 +    %5:_(s16) = G_AND %4, %4  ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir index 97018fa..01eb391 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-add.v2s16.mir @@ -1,6 +1,5 @@  # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s -# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - | FileCheck %s  ---  name: add_v2s16_ss @@ -18,16 +17,19 @@ body: |      ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16      ; CHECK-NEXT: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32)      ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>) -    ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 -    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C1]](s32) +    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C]](s32)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:sgpr(s32) = G_ADD [[BITCAST]], [[BITCAST1]]      ; CHECK-NEXT: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[LSHR]], [[LSHR1]]      ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ADD]](s32), [[ADD1]](s32) -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[BUILD_VECTOR_TRUNC]](<2 x s16>) +    ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 +    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR [[C1]](s16), [[C1]](s16) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(<2 x s16>) = G_AND [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR]]      %0:_(<2 x s16>) = COPY $sgpr0      %1:_(<2 x s16>) = COPY $sgpr1      %2:_(<2 x s16>) = G_ADD %0, %1 -    S_ENDPGM 0, implicit %2 +    %3:_(s16) = G_CONSTANT i16 255 +    %4:_(<2 x s16>) = G_BUILD_VECTOR %3, %3 +    %5:_(<2 x s16>) = G_AND %2, %4  ...  --- @@ -44,11 +46,11 @@ body: |      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0      ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY2]], [[COPY1]] -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](<2 x s16>) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]]      %0:_(<2 x s16>) = COPY $sgpr0      %1:_(<2 x s16>) = COPY $vgpr0      %2:_(<2 x s16>) = G_ADD %0, %1 -    S_ENDPGM 0, implicit %2 +    %3:_(<2 x s16>) = G_AND %2, %2  ...  --- @@ -65,9 +67,11 @@ body: |      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0      ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>)      ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]]      %0:_(<2 x s16>) = COPY $vgpr0      %1:_(<2 x s16>) = COPY $sgpr0      %2:_(<2 x s16>) = G_ADD %0, %1 +    %3:_(<2 x s16>) = G_AND %2, %2  ...  --- @@ -83,9 +87,9 @@ body: |      ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1      ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(<2 x s16>) = G_ADD [[COPY]], [[COPY1]] -    ; CHECK-NEXT: S_ENDPGM 0, implicit [[ADD]](<2 x s16>) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[ADD]], [[ADD]]      %0:_(<2 x s16>) = COPY $vgpr0      %1:_(<2 x s16>) = COPY $vgpr1      %2:_(<2 x s16>) = G_ADD %0, %1 -    S_ENDPGM 0, implicit %2 +    %3:_(<2 x s16>) = G_AND %2, %2  ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir index 7378c93..e0e783e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sext.mir @@ -77,10 +77,14 @@ body: |      ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0      ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C1]], [[C2]]      ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) +    ; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C3]]      %0:_(s32) = COPY $sgpr0      %1:_(s32) = COPY $sgpr1      %2:_(s1) = G_ICMP intpred(eq), %0, %1      %3:_(s16) = G_SEXT %2 +    %4:_(s16) = G_CONSTANT i16 255 +    %5:_(s16) = G_AND %3, %4  ...  --- @@ -215,9 +219,13 @@ body: |      ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0      ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C1]], [[C2]]      ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) +    ; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C3]]      %0:_(s32) = COPY $sgpr0      %1:_(s1) = G_TRUNC %0      %2:_(s16) = G_SEXT %1 +    %3:_(s16) = G_CONSTANT i16 255 +    %4:_(s16) = G_AND %2, %3  ...  --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir index b0199d3..e3c01c0 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sub.mir @@ -1,5 +1,107 @@  # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass="amdgpu-regbankselect,amdgpu-regbanklegalize" %s -o - | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s +# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-regbankselect,amdgpu-regbanklegalize %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s + +--- +name: sub_s16_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1 +    ; CHECK-LABEL: name: sub_s16_ss +    ; CHECK: liveins: $sgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC]](s16) +    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:sgpr(s32) = G_ANYEXT [[TRUNC1]](s16) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[ANYEXT]], [[ANYEXT1]] +    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SUB]](s32) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC2]], [[TRUNC2]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_SUB %2, %3 +    %6:_(s16) = G_AND %4, %4 +... + +--- +name: sub_s16_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: sub_s16_sv +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC]](s16) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[COPY2]], [[TRUNC1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr0 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_SUB %2, %3 +    %6:_(s16) = G_AND %4, %4 +... + +--- +name: sub_s16_vs +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: sub_s16_vs +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:sgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s16) = COPY [[TRUNC1]](s16) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[TRUNC]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr0 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_SUB %2, %3 +    %6:_(s16) = G_AND %4, %4 +... + +--- +name: sub_s16_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1 +    ; CHECK-LABEL: name: sub_s16_vv +    ; CHECK: liveins: $vgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY]](s32) +    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s16) = G_TRUNC [[COPY1]](s32) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s16) = G_SUB [[TRUNC]], [[TRUNC1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s16) = G_AND [[SUB]], [[SUB]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s16) = G_TRUNC %0 +    %3:_(s16) = G_TRUNC %1 +    %4:_(s16) = G_SUB %2, %3 +    %6:_(s16) = G_AND %4, %4 +...  ---  name: sub_s32_ss @@ -14,9 +116,11 @@ body: |      ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1      ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[SUB]], [[SUB]]      %0:_(s32) = COPY $sgpr0      %1:_(s32) = COPY $sgpr1      %2:_(s32) = G_SUB %0, %1 +    %4:_(s32) = G_AND %2, %2  ...  --- @@ -33,9 +137,11 @@ body: |      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0      ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)      ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]]      %0:_(s32) = COPY $sgpr0      %1:_(s32) = COPY $vgpr0      %2:_(s32) = G_SUB %0, %1 +    %4:_(s32) = G_AND %2, %2  ...  --- @@ -52,9 +158,11 @@ body: |      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0      ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)      ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]]      %0:_(s32) = COPY $vgpr0      %1:_(s32) = COPY $sgpr0      %2:_(s32) = G_SUB %0, %1 +    %4:_(s32) = G_AND %2, %2  ...  --- @@ -70,7 +178,376 @@ body: |      ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0      ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1      ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[SUB]], [[SUB]]      %0:_(s32) = COPY $vgpr0      %1:_(s32) = COPY $vgpr1      %2:_(s32) = G_SUB %0, %1 +    %4:_(s32) = G_AND %2, %2 +... + +--- +name: sub_v2s16_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1 +    ; CHECK-LABEL: name: sub_v2s16_ss +    ; CHECK: liveins: $sgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr1 +    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY]](<2 x s16>) +    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 +    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST]], [[C]](s32) +    ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:sgpr(s32) = G_BITCAST [[COPY1]](<2 x s16>) +    ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:sgpr(s32) = G_LSHR [[BITCAST1]], [[C]](s32) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s32) = G_SUB [[BITCAST]], [[BITCAST1]] +    ; CHECK-NEXT: [[SUB1:%[0-9]+]]:sgpr(s32) = G_SUB [[LSHR]], [[LSHR1]] +    ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[SUB]](s32), [[SUB1]](s32) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(<2 x s16>) = G_AND [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC]] +    %0:_(<2 x s16>) = COPY $sgpr0 +    %1:_(<2 x s16>) = COPY $sgpr1 +    %2:_(<2 x s16>) = G_SUB %0, %1 +    %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_v2s16_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: sub_v2s16_sv +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY]](<2 x s16>) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]] +    %0:_(<2 x s16>) = COPY $sgpr0 +    %1:_(<2 x s16>) = COPY $vgpr0 +    %2:_(<2 x s16>) = G_SUB %0, %1 +    %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_v2s16_vs +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr0 +    ; CHECK-LABEL: name: sub_v2s16_vs +    ; CHECK: liveins: $sgpr0, $vgpr0 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(<2 x s16>) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[COPY1]](<2 x s16>) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]] +    %0:_(<2 x s16>) = COPY $vgpr0 +    %1:_(<2 x s16>) = COPY $sgpr0 +    %2:_(<2 x s16>) = G_SUB %0, %1 +    %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_v2s16_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1 +    ; CHECK-LABEL: name: sub_v2s16_vv +    ; CHECK: liveins: $vgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr1 +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(<2 x s16>) = G_SUB [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(<2 x s16>) = G_AND [[SUB]], [[SUB]] +    %0:_(<2 x s16>) = COPY $vgpr0 +    %1:_(<2 x s16>) = COPY $vgpr1 +    %2:_(<2 x s16>) = G_SUB %0, %1 +    %5:_(<2 x s16>) = G_AND %2, %2 +... + +--- +name: sub_s64_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0_sgpr1, $sgpr0_sgpr1 +    ; CHECK-LABEL: name: sub_s64_ss +    ; CHECK: liveins: $sgpr0_sgpr1, $sgpr0_sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:sgpr(s64) = G_SUB [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s64) = G_AND [[SUB]], [[SUB]] +    %0:_(s64) = COPY $sgpr0_sgpr1 +    %1:_(s64) = COPY $sgpr0_sgpr1 +    %2:_(s64) = G_SUB %0, %1 +    %4:_(s64) = G_AND %2, %2 +... + +--- +name: sub_s64_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-LABEL: name: sub_s64_sv +    ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) +    ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] +    ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) +    %0:_(s64) = COPY $sgpr0_sgpr1 +    %1:_(s64) = COPY $vgpr0_vgpr1 +    %2:_(s64) = G_SUB %0, %1 +    %4:_(s64) = G_AND %2, %2 +... + +--- +name: sub_s64_vs +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-LABEL: name: sub_s64_vs +    ; CHECK: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s64) = COPY [[COPY1]](s64) +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) +    ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] +    ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) +    %0:_(s64) = COPY $vgpr0_vgpr1 +    %1:_(s64) = COPY $sgpr0_sgpr1 +    %2:_(s64) = G_SUB %0, %1 +    %4:_(s64) = G_AND %2, %2 +... + +--- +name: sub_s64_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 +    ; CHECK-LABEL: name: sub_s64_vv +    ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3 +    ; CHECK-NEXT: [[SUB:%[0-9]+]]:vgpr(s64) = G_SUB [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) +    ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SUB]](s64) +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[UV2]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[UV3]] +    ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) +    %0:_(s64) = COPY $vgpr0_vgpr1 +    %1:_(s64) = COPY $vgpr2_vgpr3 +    %2:_(s64) = G_SUB %0, %1 +    %4:_(s64) = G_AND %2, %2 +... + +--- +name: usubo_s32_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1 +    ; CHECK-LABEL: name: usubo_s32_ss +    ; CHECK: liveins: $sgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:sgpr(s32), [[USUBO1:%[0-9]+]]:sgpr(s32) = G_USUBO [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[USUBO]], [[USUBO]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32), %3:_(s1) = G_USUBO %0, %1 +    %5:_(s32) = G_AND %2, %2 +... + +--- +name: usubo_s32_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr1 +    ; CHECK-LABEL: name: usubo_s32_sv +    ; CHECK: liveins: $sgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) +    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY2]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32), %3:_(s1) = G_USUBO %0, %1 +    %5:_(s32) = G_AND %2, %2 +... + +--- +name: usubo_s32_vs +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $sgpr1 +    ; CHECK-LABEL: name: usubo_s32_vs +    ; CHECK: liveins: $vgpr0, $sgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) +    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY2]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32), %3:_(s1) = G_USUBO %0, %1 +    %5:_(s32) = G_AND %2, %2 +... + +--- +name: usubo_s32_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1 +    ; CHECK-LABEL: name: usubo_s32_vv +    ; CHECK: liveins: $vgpr0, $vgpr1 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[COPY]], [[COPY1]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBO]], [[USUBO]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32), %3:_(s1) = G_USUBO %0, %1 +    %5:_(s32) = G_AND %2, %2 +... + +--- +name: usube_s32_ss +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $sgpr1, $sgpr2 +    ; CHECK-LABEL: name: usube_s32_ss +    ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY2]], [[C]] +    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:sgpr(s32) = G_USUBE [[COPY]], [[COPY1]], [[AND]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s32) = G_AND [[USUBE]], [[USUBE]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 +    %7:_(s32) = G_AND %4, %4 +... + +--- +name: usube_s32_sv +legalized: true + +body: | +  bb.0: +    liveins: $sgpr0, $vgpr1, $sgpr2 +    ; CHECK-LABEL: name: usube_s32_sv +    ; CHECK: liveins: $sgpr0, $vgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) +    ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) +    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY3]], [[COPY1]], [[AMDGPU_COPY_VCC_SCC]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]] +    %0:_(s32) = COPY $sgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 +    %7:_(s32) = G_AND %4, %4 +... + +--- +name: usube_s32_vs +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $sgpr1, $sgpr2 +    ; CHECK-LABEL: name: usube_s32_vs +    ; CHECK: liveins: $vgpr0, $sgpr1, $sgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 +    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) +    ; CHECK-NEXT: [[AMDGPU_COPY_VCC_SCC:%[0-9]+]]:vcc(s1) = G_AMDGPU_COPY_VCC_SCC [[COPY2]](s32) +    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY3]], [[AMDGPU_COPY_VCC_SCC]] +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $sgpr1 +    %2:_(s32) = COPY $sgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 +    %7:_(s32) = G_AND %4, %4 +... + +--- +name: usube_s32_vv +legalized: true + +body: | +  bb.0: +    liveins: $vgpr0, $vgpr1, $vgpr2 +    ; CHECK-LABEL: name: usube_s32_vv +    ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 +    ; CHECK-NEXT: {{  $}} +    ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 +    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 +    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 +    ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 +    ; CHECK-NEXT: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[COPY2]], [[C]] +    ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 +    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[AND]](s32), [[C1]] +    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]] +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[USUBE]], [[USUBE]] +    %0:_(s32) = COPY $vgpr0 +    %1:_(s32) = COPY $vgpr1 +    %2:_(s32) = COPY $vgpr2 +    %3:_(s1) = G_TRUNC %2 +    %4:_(s32), %5:_(s1) = G_USUBE %0, %1, %3 +    %7:_(s32) = G_AND %4, %4  ... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir index 088c20a3..d4baa5f 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-zext.mir @@ -73,10 +73,14 @@ body: |      ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0      ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]]      ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) +    ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C2]]      %0:_(s32) = COPY $sgpr0      %1:_(s32) = COPY $sgpr1      %2:_(s1) = G_ICMP intpred(eq), %0, %1      %3:_(s16) = G_ZEXT %2 +    %4:_(s16) = G_CONSTANT i16 255 +    %5:_(s16) = G_AND %3, %4  ...  --- @@ -209,9 +213,13 @@ body: |      ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0      ; CHECK-NEXT: [[SELECT:%[0-9]+]]:sgpr(s32) = G_SELECT [[AND]](s32), [[C]], [[C1]]      ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SELECT]](s32) +    ; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s16) = G_CONSTANT i16 255 +    ; CHECK-NEXT: [[AND1:%[0-9]+]]:sgpr(s16) = G_AND [[TRUNC]], [[C2]]      %0:_(s32) = COPY $sgpr0      %1:_(s1) = G_TRUNC %0      %2:_(s16) = G_ZEXT %1 +    %3:_(s16) = G_CONSTANT i16 255 +    %4:_(s16) = G_AND %2, %3  ...  --- diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll new file mode 100644 index 0000000..8b5958d --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sub.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s + +define i16 @s_sub_i16(i16 inreg %a, i16 inreg %b) { +; GFX7-LABEL: s_sub_i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_sub_i32 s4, s16, s17 +; GFX7-NEXT:    v_mov_b32_e32 v0, s4 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_sub_i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_sub_i32 s4, s16, s17 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_sub_i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_sub_i32 s4, s16, s17 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_sub_i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_sub_i32 s4, s16, s17 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_sub_i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_sub_i32 s0, s0, s1 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_sub_i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_sub_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub i16 %a, %b +  ret i16 %c +} + +define i16 @v_sub_i16(i16 %a, i16 %b) { +; GFX7-LABEL: v_sub_i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_sub_u16_e32 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_sub_u16_e32 v0, v0, v1 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_sub_nc_u16 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_sub_nc_u16 v0.l, v0.l, v1.l +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_sub_nc_u16 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub i16 %a, %b +  ret i16 %c +} + +define i32 @s_sub_i32(i32 inreg %a, i32 inreg %b) { +; GFX7-LABEL: s_sub_i32: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_sub_i32 s4, s16, s17 +; GFX7-NEXT:    v_mov_b32_e32 v0, s4 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_sub_i32: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_sub_i32 s4, s16, s17 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_sub_i32: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_sub_i32 s4, s16, s17 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_sub_i32: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_sub_i32 s4, s16, s17 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_sub_i32: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_sub_i32 s0, s0, s1 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_sub_i32: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_sub_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub i32 %a, %b +  ret i32 %c +} + +define i32 @v_sub_i32(i32 %a, i32 %b) { +; GFX7-LABEL: v_sub_i32: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_i32: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_i32: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_i32: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_sub_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_i32: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_sub_nc_u32_e32 v0, v0, v1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_i32: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_sub_nc_u32_e32 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub i32 %a, %b +  ret i32 %c +} + +; TODO: Add test for s_sub_v2i16. Instruction selector currently fails +; to handle G_UNMERGE_VALUES. + +define <2 x i16> @v_sub_v2i16(<2 x i16> %a, <2 x i16> %b) { +; GFX7-LABEL: v_sub_v2i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_sub_i32_e32 v1, vcc, v1, v3 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_v2i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_pk_sub_i16 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_v2i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_sub_u16_e32 v2, v0, v1 +; GFX8-NEXT:    v_sub_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_v2i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_pk_sub_i16 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_v2i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_pk_sub_i16 v0, v0, v1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_v2i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_pk_sub_i16 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub <2 x i16> %a, %b +  ret <2 x i16> %c +} + +define i64 @s_sub_i64(i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_sub_i64: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_sub_u32 s4, s16, s18 +; GFX7-NEXT:    s_subb_u32 s5, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v0, s4 +; GFX7-NEXT:    v_mov_b32_e32 v1, s5 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_sub_i64: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_sub_u32 s4, s16, s18 +; GFX9-NEXT:    s_subb_u32 s5, s17, s19 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    v_mov_b32_e32 v1, s5 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_sub_i64: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_sub_u32 s4, s16, s18 +; GFX8-NEXT:    s_subb_u32 s5, s17, s19 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    v_mov_b32_e32 v1, s5 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_sub_i64: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_sub_u32 s4, s16, s18 +; GFX10-NEXT:    s_subb_u32 s5, s17, s19 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    v_mov_b32_e32 v1, s5 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_sub_i64: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_sub_u32 s0, s0, s2 +; GFX11-NEXT:    s_subb_u32 s1, s1, s3 +; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_sub_i64: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_sub_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub i64 %a, %b +  ret i64 %c +} + +define i64 @v_sub_i64(i64 %a, i64 %b) { +; GFX7-LABEL: v_sub_i64: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_sub_i64: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_sub_i64: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_sub_i64: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_sub_i64: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_sub_i64: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_sub_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = sub i64 %a, %b +  ret i64 %c +} + +define void @s_usubo_usube(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: s_usubo_usube: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_sub_u32 s4, s16, s18 +; GFX7-NEXT:    s_subb_u32 s5, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v4, s4 +; GFX7-NEXT:    s_mov_b32 s6, 0 +; GFX7-NEXT:    s_cselect_b32 s8, 1, 0 +; GFX7-NEXT:    v_mov_b32_e32 v5, s5 +; GFX7-NEXT:    s_mov_b32 s7, 0xf000 +; GFX7-NEXT:    s_mov_b64 s[4:5], 0 +; GFX7-NEXT:    buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT:    v_mov_b32_e32 v0, s8 +; GFX7-NEXT:    buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GFX7-NEXT:    s_waitcnt vmcnt(0) +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_usubo_usube: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_sub_u32 s4, s16, s18 +; GFX9-NEXT:    s_subb_u32 s5, s17, s19 +; GFX9-NEXT:    v_mov_b32_e32 v4, s4 +; GFX9-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX9-NEXT:    v_mov_b32_e32 v5, s5 +; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off +; GFX9-NEXT:    v_mov_b32_e32 v0, s6 +; GFX9-NEXT:    global_store_dword v[2:3], v0, off +; GFX9-NEXT:    s_waitcnt vmcnt(0) +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_usubo_usube: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_sub_u32 s4, s16, s18 +; GFX8-NEXT:    s_subb_u32 s5, s17, s19 +; GFX8-NEXT:    v_mov_b32_e32 v4, s4 +; GFX8-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX8-NEXT:    v_mov_b32_e32 v5, s5 +; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[4:5] +; GFX8-NEXT:    v_mov_b32_e32 v0, s6 +; GFX8-NEXT:    flat_store_dword v[2:3], v0 +; GFX8-NEXT:    s_waitcnt vmcnt(0) +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_usubo_usube: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_sub_u32 s4, s16, s18 +; GFX10-NEXT:    s_subb_u32 s5, s17, s19 +; GFX10-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX10-NEXT:    v_mov_b32_e32 v4, s4 +; GFX10-NEXT:    v_mov_b32_e32 v5, s5 +; GFX10-NEXT:    v_mov_b32_e32 v6, s6 +; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off +; GFX10-NEXT:    global_store_dword v[2:3], v6, off +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_usubo_usube: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_sub_u32 s0, s0, s2 +; GFX11-NEXT:    s_subb_u32 s1, s1, s3 +; GFX11-NEXT:    s_cselect_b32 s2, 1, 0 +; GFX11-NEXT:    v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX11-NEXT:    v_mov_b32_e32 v6, s2 +; GFX11-NEXT:    global_store_b64 v[0:1], v[4:5], off +; GFX11-NEXT:    global_store_b32 v[2:3], v6, off +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_usubo_usube: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_sub_co_u32 s0, s0, s2 +; GFX12-NEXT:    s_sub_co_ci_u32 s1, s1, s3 +; GFX12-NEXT:    s_cselect_b32 s2, 1, 0 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX12-NEXT:    v_mov_b32_e32 v6, s2 +; GFX12-NEXT:    global_store_b64 v[0:1], v[4:5], off +; GFX12-NEXT:    global_store_b32 v[2:3], v6, off +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) +  %sub = extractvalue {i64, i1} %usubo, 0 +  %of = extractvalue {i64, i1} %usubo, 1 +  %of32 = select i1 %of, i32 1, i32 0 +  store i64 %sub, ptr addrspace(1) %res +  store i32 %of32, ptr addrspace(1) %carry +  ret void +} + +define void @v_usubo_usube(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: v_usubo_usube: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT:    s_mov_b32 s6, 0 +; GFX7-NEXT:    s_mov_b32 s7, 0xf000 +; GFX7-NEXT:    s_mov_b64 s[4:5], 0 +; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX7-NEXT:    buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT:    buffer_store_dword v2, v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT:    s_waitcnt vmcnt(0) +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_usubo_usube: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX9-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off +; GFX9-NEXT:    global_store_dword v[6:7], v2, off +; GFX9-NEXT:    s_waitcnt vmcnt(0) +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_usubo_usube: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX8-NEXT:    flat_store_dwordx2 v[4:5], v[0:1] +; GFX8-NEXT:    flat_store_dword v[6:7], v2 +; GFX8-NEXT:    s_waitcnt vmcnt(0) +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_usubo_usube: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX10-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off +; GFX10-NEXT:    global_store_dword v[6:7], v2, off +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_usubo_usube: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX11-NEXT:    global_store_b64 v[4:5], v[0:1], off +; GFX11-NEXT:    global_store_b32 v[6:7], v2, off +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_usubo_usube: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX12-NEXT:    global_store_b64 v[4:5], v[0:1], off +; GFX12-NEXT:    global_store_b32 v[6:7], v2, off +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %usubo = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b) +  %sub = extractvalue {i64, i1} %usubo, 0 +  %of = extractvalue {i64, i1} %usubo, 1 +  %of32 = select i1 %of, i32 1, i32 0 +  store i64 %sub, ptr addrspace(1) %res +  store i32 %of32, ptr addrspace(1) %carry +  ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll new file mode 100644 index 0000000..34d4c51 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsic.ll @@ -0,0 +1,173 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100  -o - %s | FileCheck %s +define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readfirstlane_with_readfirstlane: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5) +  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1) +  store i32 %v2, ptr addrspace(1) %out +  ret void +} + +define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readfirstlane_with_readlane: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_bfe_u32 v1, v0, 10, 10 +; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT:    v_readfirstlane_b32 s2, v1 +; CHECK-NEXT:    v_readlane_b32 s2, v0, s2 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %tidx = call i32 @llvm.amdgcn.workitem.id.x() +  %tidy = call i32 @llvm.amdgcn.workitem.id.y() +  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy) +  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1) +  store i32 %v2, ptr addrspace(1) %out +  ret void +} + +define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readlane_with_firstlane: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT:    v_readfirstlane_b32 s2, v0 +; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %tidx = call i32 @llvm.amdgcn.workitem.id.x() +  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx) +  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3) +  store i32 %v2, ptr addrspace(1) %out +  ret void +} + +define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) { +; CHECK-LABEL: readlane_readlane: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_bfe_u32 v1, v0, 10, 10 +; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT:    v_readfirstlane_b32 s2, v1 +; CHECK-NEXT:    v_readlane_b32 s2, v0, s2 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %tidx = call i32 @llvm.amdgcn.workitem.id.x() +  %tidy = call i32 @llvm.amdgcn.workitem.id.y() +  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy) +  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2) +  store i32 %v2, ptr addrspace(1) %out +  ret void +} + +define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) { +; CHECK-LABEL: permlane64_uniform: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_clause 0x1 +; CHECK-NEXT:    s_load_b32 s2, s[4:5], 0x8 +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %v = call i32 @llvm.amdgcn.permlane64(i32 %src) +  store i32 %v, ptr addrspace(1) %out +  ret void +} + +define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) { +; CHECK-LABEL: permlane64_nonuniform: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT:    v_permlane64_b32 v1, v0 +; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %tid = call i32 @llvm.amdgcn.workitem.id.x() +  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid) +  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid +  store i32 %v, i32 addrspace(1)* %out_ptr +  ret void +} + +define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) { +; CHECK-LABEL: permlane64_nonuniform_expression: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0 +; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) +; CHECK-NEXT:    v_add_nc_u32_e32 v1, 1, v0 +; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0 +; CHECK-NEXT:    v_permlane64_b32 v1, v1 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_endpgm +  %tid = call i32 @llvm.amdgcn.workitem.id.x() +  %tid2 = add i32 %tid, 1 +  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2) +  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid +  store i32 %v, i32 addrspace(1)* %out_ptr +  ret void +} + +define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) { +; CHECK-LABEL: trivial_waterfall_eq_zero: +; CHECK:       ; %bb.0: ; %entry +; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0 +; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5 +; CHECK-NEXT:    s_mov_b32 s2, 0 +; CHECK-NEXT:    s_branch .LBB7_2 +; CHECK-NEXT:  .LBB7_1: ; %Flow +; CHECK-NEXT:    ; in Loop: Header=BB7_2 Depth=1 +; CHECK-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s2 +; CHECK-NEXT:    s_mov_b32 s2, -1 +; CHECK-NEXT:    s_cbranch_vccz .LBB7_4 +; CHECK-NEXT:  .LBB7_2: ; %while +; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT:    s_and_b32 vcc_lo, exec_lo, s2 +; CHECK-NEXT:    s_mov_b32 s2, -1 +; CHECK-NEXT:    s_cbranch_vccnz .LBB7_1 +; CHECK-NEXT:  ; %bb.3: ; %if +; CHECK-NEXT:    ; in Loop: Header=BB7_2 Depth=1 +; CHECK-NEXT:    s_mov_b32 s2, 0 +; CHECK-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT:    s_branch .LBB7_1 +; CHECK-NEXT:  .LBB7_4: ; %exit +; CHECK-NEXT:    s_endpgm +entry: +  br label %while + +while: +  %done = phi i1 [ 0, %entry ], [ 1, %if ] +  %not_done = xor i1 %done, true +  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done) +  %is_done = icmp eq i64 %ballot, 0 ; in this case is_done = !not_done +  br i1 %is_done, label %exit, label %if + +if: +  store i32 5, ptr addrspace(1) %out +  br label %while + +exit: +  ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll index 33ce278..c962c05 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll @@ -1,6 +1,7 @@  ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s -check-prefix=O3-CHECK  define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) { diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll index a3e42e5..a7e828c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll @@ -1,5 +1,6 @@  ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll index 2fde3e3..7929261 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll @@ -1,5 +1,6 @@  ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK +; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK  ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=COMB-CHECK  ; This should not be optimized diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll index db32135..b8f084d 100644 --- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll +++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-wwm.ll @@ -4,24 +4,14 @@  define amdgpu_gs i32 @main() {  ; CHECK-LABEL: main:  ; CHECK:       ; %bb.0: ; %bb -; CHECK-NEXT:    s_bitcmp1_b32 0, 0  ; CHECK-NEXT:    s_mov_b32 s0, 0 -; CHECK-NEXT:    s_cselect_b32 s1, -1, 0 -; CHECK-NEXT:    s_or_saveexec_b32 s2, -1 -; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s1 -; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; CHECK-NEXT:    v_readfirstlane_b32 s1, v0 -; CHECK-NEXT:    s_mov_b32 exec_lo, s2 -; CHECK-NEXT:    s_or_b32 s0, s0, s1 -; CHECK-NEXT:    s_wait_alu 0xfffe +; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)  ; CHECK-NEXT:    s_bitcmp1_b32 s0, 0  ; CHECK-NEXT:    s_cselect_b32 s0, -1, 0 -; CHECK-NEXT:    s_wait_alu 0xfffe  ; CHECK-NEXT:    s_xor_b32 s0, s0, -1 -; CHECK-NEXT:    s_wait_alu 0xfffe -; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s0 -; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; CHECK-NEXT:    v_readfirstlane_b32 s0, v1 +; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT:    v_readfirstlane_b32 s0, v0  ; CHECK-NEXT:    s_wait_alu 0xf1ff  ; CHECK-NEXT:    ; return to shader part epilog  bb: diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll index 3aa3663..704ea37 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll @@ -9,11 +9,11 @@  ; RUN:   | FileCheck -check-prefix=GCN-O3 %s -; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O0>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O2>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt,amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) -; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function)) +; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp<O3>),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt,amdgpu-uniform-intrinsic-combine),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments,codegenprepare,load-store-vectorizer),amdgpu-lower-buffer-fat-pointers,amdgpu-lower-intrinsics,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-lower-vgpr-encoding,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))  define void @empty() {    ret void diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll index 6e52125..ee6caab 100644 --- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll +++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll @@ -31,6 +31,11 @@  ; GCN-O0-NEXT:    AMDGPU Remove Incompatible Functions  ; GCN-O0-NEXT:    AMDGPU Printf lowering  ; GCN-O0-NEXT:    Lower ctors and dtors for AMDGPU +; GCN-O0-NEXT:    FunctionPass Manager +; GCN-O0-NEXT:      Dominator Tree Construction +; GCN-O0-NEXT:      Cycle Info Analysis +; GCN-O0-NEXT:      Uniformity Analysis +; GCN-O0-NEXT:      AMDGPU Uniform Intrinsic Combine  ; GCN-O0-NEXT:    Expand variadic functions  ; GCN-O0-NEXT:    AMDGPU Inline All Functions  ; GCN-O0-NEXT:    Inliner for always_inline functions @@ -179,6 +184,11 @@  ; GCN-O1-NEXT:    AMDGPU Remove Incompatible Functions  ; GCN-O1-NEXT:    AMDGPU Printf lowering  ; GCN-O1-NEXT:    Lower ctors and dtors for AMDGPU +; GCN-O1-NEXT:    FunctionPass Manager +; GCN-O1-NEXT:      Dominator Tree Construction +; GCN-O1-NEXT:      Cycle Info Analysis +; GCN-O1-NEXT:      Uniformity Analysis +; GCN-O1-NEXT:      AMDGPU Uniform Intrinsic Combine  ; GCN-O1-NEXT:    Expand variadic functions  ; GCN-O1-NEXT:    AMDGPU Inline All Functions  ; GCN-O1-NEXT:    Inliner for always_inline functions @@ -466,6 +476,11 @@  ; GCN-O1-OPTS-NEXT:    AMDGPU Remove Incompatible Functions  ; GCN-O1-OPTS-NEXT:    AMDGPU Printf lowering  ; GCN-O1-OPTS-NEXT:    Lower ctors and dtors for AMDGPU +; GCN-O1-OPTS-NEXT:    FunctionPass Manager +; GCN-O1-OPTS-NEXT:      Dominator Tree Construction +; GCN-O1-OPTS-NEXT:      Cycle Info Analysis +; GCN-O1-OPTS-NEXT:      Uniformity Analysis +; GCN-O1-OPTS-NEXT:      AMDGPU Uniform Intrinsic Combine  ; GCN-O1-OPTS-NEXT:    Expand variadic functions  ; GCN-O1-OPTS-NEXT:    AMDGPU Inline All Functions  ; GCN-O1-OPTS-NEXT:    Inliner for always_inline functions @@ -783,6 +798,10 @@  ; GCN-O2-NEXT:    Lower ctors and dtors for AMDGPU  ; GCN-O2-NEXT:    FunctionPass Manager  ; GCN-O2-NEXT:      AMDGPU Image Intrinsic Optimizer +; GCN-O2-NEXT:      Dominator Tree Construction +; GCN-O2-NEXT:      Cycle Info Analysis +; GCN-O2-NEXT:      Uniformity Analysis +; GCN-O2-NEXT:      AMDGPU Uniform Intrinsic Combine  ; GCN-O2-NEXT:    Expand variadic functions  ; GCN-O2-NEXT:    AMDGPU Inline All Functions  ; GCN-O2-NEXT:    Inliner for always_inline functions @@ -1104,6 +1123,10 @@  ; GCN-O3-NEXT:    Lower ctors and dtors for AMDGPU  ; GCN-O3-NEXT:    FunctionPass Manager  ; GCN-O3-NEXT:      AMDGPU Image Intrinsic Optimizer +; GCN-O3-NEXT:      Dominator Tree Construction +; GCN-O3-NEXT:      Cycle Info Analysis +; GCN-O3-NEXT:      Uniformity Analysis +; GCN-O3-NEXT:      AMDGPU Uniform Intrinsic Combine  ; GCN-O3-NEXT:    Expand variadic functions  ; GCN-O3-NEXT:    AMDGPU Inline All Functions  ; GCN-O3-NEXT:    Inliner for always_inline functions diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll index e00e1f1..c1f3a12 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i32.ll @@ -110,9 +110,8 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, s0, 1 -; CHECK-NEXT:    v_cmp_ne_u32_e64 vcc_lo, s0, 0 -; CHECK-NEXT:    s_cbranch_vccz .LBB8_2 +; CHECK-NEXT:    s_bitcmp0_b32 s0, 0 +; CHECK-NEXT:    s_cbranch_scc1 .LBB8_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB8_3 @@ -156,15 +155,16 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, s0, 1 -; CHECK-NEXT:    v_cmp_ne_u32_e64 vcc_lo, s0, 0 -; CHECK-NEXT:    s_cbranch_vccz .LBB10_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB10_3 -; CHECK-NEXT:  .LBB10_2: ; %true +; CHECK-NEXT:    s_bitcmp1_b32 s0, 0 +; CHECK-NEXT:    s_cselect_b32 s0, -1, 0 +; CHECK-NEXT:    s_and_b32 vcc_lo, exec_lo, s0 +; CHECK-NEXT:    s_cbranch_vccnz .LBB10_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB10_3 +; CHECK-NEXT:  .LBB10_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB10_3  ; CHECK-NEXT:  .LBB10_3:    %c = trunc i32 %v to i1    %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -201,8 +201,8 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    v_cmp_lt_u32_e64 vcc_lo, s0, 12 -; CHECK-NEXT:    s_cbranch_vccz .LBB12_2 +; CHECK-NEXT:    s_cmp_gt_u32 s0, 11 +; CHECK-NEXT:    s_cbranch_scc1 .LBB12_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB12_3 @@ -245,14 +245,14 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    v_cmp_lt_u32_e64 vcc_lo, s0, 12 -; CHECK-NEXT:    s_cbranch_vccz .LBB14_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB14_3 -; CHECK-NEXT:  .LBB14_2: ; %true +; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 +; CHECK-NEXT:    s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB14_3 +; CHECK-NEXT:  .LBB14_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB14_3  ; CHECK-NEXT:  .LBB14_3:    %c = icmp ult i32 %v, 12    %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c) @@ -293,13 +293,13 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 +; CHECK-NEXT:    s_cmp_gt_u32 s0, 11  ; CHECK-NEXT:    s_cselect_b32 s0, -1, 0 -; CHECK-NEXT:    s_cmp_gt_u32 s1, 34 +; CHECK-NEXT:    s_cmp_lt_u32 s1, 35  ; CHECK-NEXT:    s_cselect_b32 s1, -1, 0 -; CHECK-NEXT:    s_and_b32 s0, s0, s1 -; CHECK-NEXT:    s_and_b32 s0, s0, exec_lo -; CHECK-NEXT:    s_cbranch_scc0 .LBB16_2 +; CHECK-NEXT:    s_or_b32 s0, s0, s1 +; CHECK-NEXT:    s_and_b32 vcc_lo, exec_lo, s0 +; CHECK-NEXT:    s_cbranch_vccnz .LBB16_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB16_3 @@ -353,14 +353,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg  ; CHECK-NEXT:    s_cmp_gt_u32 s1, 34  ; CHECK-NEXT:    s_cselect_b32 s1, -1, 0  ; CHECK-NEXT:    s_and_b32 s0, s0, s1 -; CHECK-NEXT:    s_and_b32 s0, s0, exec_lo -; CHECK-NEXT:    s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB18_3 -; CHECK-NEXT:  .LBB18_2: ; %true +; CHECK-NEXT:    s_and_b32 vcc_lo, exec_lo, s0 +; CHECK-NEXT:    s_cbranch_vccnz .LBB18_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB18_3 +; CHECK-NEXT:  .LBB18_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB18_3  ; CHECK-NEXT:  .LBB18_3:    %v1c = icmp ult i32 %v1, 12    %v2c = icmp ugt i32 %v2, 34 @@ -591,3 +591,24 @@ exit:    store i32 %ballot, ptr addrspace(1) %out    ret void  } + +define amdgpu_cs i32 @compare_bfloats(bfloat %x, bfloat %y) { +; GFX10-LABEL: compare_bfloats: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1 +; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0 +; GFX10-NEXT:    v_cmp_gt_f32_e64 s0, v0, v1 +; GFX10-NEXT:    ; return to shader part epilog +; +; GFX11-LABEL: compare_bfloats: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    v_mov_b16_e32 v2.l, 0 +; GFX11-NEXT:    v_mov_b16_e32 v2.h, v1.l +; GFX11-NEXT:    v_mov_b16_e32 v1.h, v0.l +; GFX11-NEXT:    v_mov_b16_e32 v1.l, v2.l +; GFX11-NEXT:    v_cmp_gt_f32_e64 s0, v1, v2 +; GFX11-NEXT:    ; return to shader part epilog +  %cmp = fcmp ogt bfloat %x, %y +  %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %cmp) +  ret i32 %ballot +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll index b4adf7f..827a01f 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ballot.i64.ll @@ -113,9 +113,8 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, s0, 1 -; CHECK-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0 -; CHECK-NEXT:    s_cbranch_vccz .LBB8_2 +; CHECK-NEXT:    s_bitcmp0_b32 s0, 0 +; CHECK-NEXT:    s_cbranch_scc1 .LBB8_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB8_3 @@ -159,15 +158,16 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_non_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_non_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_and_b32 s0, s0, 1 -; CHECK-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0 -; CHECK-NEXT:    s_cbranch_vccz .LBB10_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB10_3 -; CHECK-NEXT:  .LBB10_2: ; %true +; CHECK-NEXT:    s_bitcmp1_b32 s0, 0 +; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0 +; CHECK-NEXT:    s_and_b64 vcc, exec, s[0:1] +; CHECK-NEXT:    s_cbranch_vccnz .LBB10_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB10_3 +; CHECK-NEXT:  .LBB10_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB10_3  ; CHECK-NEXT:  .LBB10_3:    %c = trunc i32 %v to i1    %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -204,8 +204,8 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    v_cmp_lt_u32_e64 vcc, s0, 12 -; CHECK-NEXT:    s_cbranch_vccz .LBB12_2 +; CHECK-NEXT:    s_cmp_gt_u32 s0, 11 +; CHECK-NEXT:    s_cbranch_scc1 .LBB12_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB12_3 @@ -248,14 +248,14 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_compare(i32 inreg %v) {  ; CHECK-LABEL: branch_uniform_ballot_eq_zero_compare:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    v_cmp_lt_u32_e64 vcc, s0, 12 -; CHECK-NEXT:    s_cbranch_vccz .LBB14_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB14_3 -; CHECK-NEXT:  .LBB14_2: ; %true +; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 +; CHECK-NEXT:    s_cbranch_scc1 .LBB14_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB14_3 +; CHECK-NEXT:  .LBB14_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB14_3  ; CHECK-NEXT:  .LBB14_3:    %c = icmp ult i32 %v, 12    %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c) @@ -296,13 +296,13 @@ false:  define amdgpu_cs i32 @branch_uniform_ballot_ne_zero_and(i32 inreg %v1, i32 inreg %v2) {  ; CHECK-LABEL: branch_uniform_ballot_ne_zero_and:  ; CHECK:       ; %bb.0: -; CHECK-NEXT:    s_cmp_lt_u32 s0, 12 +; CHECK-NEXT:    s_cmp_gt_u32 s0, 11  ; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0 -; CHECK-NEXT:    s_cmp_gt_u32 s1, 34 +; CHECK-NEXT:    s_cmp_lt_u32 s1, 35  ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0 -; CHECK-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1] -; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], exec -; CHECK-NEXT:    s_cbranch_scc0 .LBB16_2 +; CHECK-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1] +; CHECK-NEXT:    s_and_b64 vcc, exec, s[0:1] +; CHECK-NEXT:    s_cbranch_vccnz .LBB16_2  ; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB16_3 @@ -356,14 +356,14 @@ define amdgpu_cs i32 @branch_uniform_ballot_eq_zero_and(i32 inreg %v1, i32 inreg  ; CHECK-NEXT:    s_cmp_gt_u32 s1, 34  ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0  ; CHECK-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1] -; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], exec -; CHECK-NEXT:    s_cbranch_scc0 .LBB18_2 -; CHECK-NEXT:  ; %bb.1: ; %false -; CHECK-NEXT:    s_mov_b32 s0, 33 -; CHECK-NEXT:    s_branch .LBB18_3 -; CHECK-NEXT:  .LBB18_2: ; %true +; CHECK-NEXT:    s_and_b64 vcc, exec, s[0:1] +; CHECK-NEXT:    s_cbranch_vccnz .LBB18_2 +; CHECK-NEXT:  ; %bb.1: ; %true  ; CHECK-NEXT:    s_mov_b32 s0, 42  ; CHECK-NEXT:    s_branch .LBB18_3 +; CHECK-NEXT:  .LBB18_2: ; %false +; CHECK-NEXT:    s_mov_b32 s0, 33 +; CHECK-NEXT:    s_branch .LBB18_3  ; CHECK-NEXT:  .LBB18_3:    %v1c = icmp ult i32 %v1, 12    %v2c = icmp ugt i32 %v2, 34 @@ -557,3 +557,15 @@ exit:    store i64 %ballot, ptr addrspace(1) %out    ret void  } + +define amdgpu_cs i64 @compare_bfloats(bfloat %x, bfloat %y) { +; CHECK-LABEL: compare_bfloats: +; CHECK:       ; %bb.0: +; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 16, v1 +; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 16, v0 +; CHECK-NEXT:    v_cmp_gt_f32_e64 s[0:1], v0, v1 +; CHECK-NEXT:    ; return to shader part epilog +  %cmp = fcmp ogt bfloat %x, %y +  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cmp) +  ret i64 %ballot +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll index 6dd2258..39191d2 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ll @@ -23,10 +23,8 @@ define amdgpu_kernel void @test_s_i32(ptr addrspace(1) %out, i32 %src0) {  ; GFX11-SDAG-NEXT:    s_load_b32 s2, s[4:5], 0x2c  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_s_i32: @@ -36,8 +34,6 @@ define amdgpu_kernel void @test_s_i32(ptr addrspace(1) %out, i32 %src0) {  ; GFX11-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v0  ; GFX11-GISEL-NEXT:    global_store_b32 v1, v0, s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm    %v = call i32 @llvm.amdgcn.permlane64.i32(i32 %src0) @@ -50,12 +46,9 @@ define amdgpu_kernel void @test_s_i64(ptr addrspace(1) %out, i64 %src0) {  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v2 -; GFX11-SDAG-NEXT:    global_store_b64 v3, v[0:1], s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-SDAG-NEXT:    v_mov_b32_e32 v0, s2 +; GFX11-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_s_i64: @@ -64,9 +57,6 @@ define amdgpu_kernel void @test_s_i64(ptr addrspace(1) %out, i64 %src0) {  ; GFX11-GISEL-NEXT:    v_mov_b32_e32 v2, 0  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-GISEL-NEXT:    v_permlane64_b32 v1, v1  ; GFX11-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm    %v = call i64 @llvm.amdgcn.permlane64.i64(i64 %src0) @@ -79,12 +69,9 @@ define amdgpu_kernel void @test_s_f64(ptr addrspace(1) %out, double %src0) {  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v2 -; GFX11-SDAG-NEXT:    global_store_b64 v3, v[0:1], s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-SDAG-NEXT:    v_mov_b32_e32 v0, s2 +; GFX11-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_s_f64: @@ -93,9 +80,6 @@ define amdgpu_kernel void @test_s_f64(ptr addrspace(1) %out, double %src0) {  ; GFX11-GISEL-NEXT:    v_mov_b32_e32 v2, 0  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-GISEL-NEXT:    v_permlane64_b32 v1, v1  ; GFX11-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm    %v = call double @llvm.amdgcn.permlane64.f64(double %src0) @@ -116,19 +100,15 @@ define amdgpu_kernel void @test_i_i32(ptr addrspace(1) %out) {  ; GFX11-SDAG-LABEL: test_i_i32:  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0x63 :: v_dual_mov_b32 v1, 0 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x63  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_i_i32:  ; GFX11-GISEL:       ; %bb.0:  ; GFX11-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-GISEL-NEXT:    v_dual_mov_b32 v0, 0x63 :: v_dual_mov_b32 v1, 0 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v0  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    global_store_b32 v1, v0, s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm @@ -141,19 +121,15 @@ define amdgpu_kernel void @test_i_f32(ptr addrspace(1) %out) {  ; GFX11-SDAG-LABEL: test_i_f32:  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0x449a5000 :: v_dual_mov_b32 v1, 0 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x449a5000  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_i_f32:  ; GFX11-GISEL:       ; %bb.0:  ; GFX11-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-GISEL-NEXT:    v_dual_mov_b32 v0, 0x449a5000 :: v_dual_mov_b32 v1, 0 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v0  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    global_store_b32 v1, v0, s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm @@ -166,23 +142,16 @@ define amdgpu_kernel void @test_i_i64(ptr addrspace(1) %out) {  ; GFX11-SDAG-LABEL: test_i_i64:  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, 0 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v0, 0x63 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v2 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, 0x63  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1] +; GFX11-SDAG-NEXT:    global_store_b64 v1, v[0:1], s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_i_i64:  ; GFX11-GISEL:       ; %bb.0:  ; GFX11-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-GISEL-NEXT:    v_mov_b32_e32 v0, 0x63 -; GFX11-GISEL-NEXT:    v_mov_b32_e32 v2, 0 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-GISEL-NEXT:    v_permlane64_b32 v1, v2 +; GFX11-GISEL-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, 0  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm @@ -195,22 +164,16 @@ define amdgpu_kernel void @test_i_f64(ptr addrspace(1) %out) {  ; GFX11-SDAG-LABEL: test_i_f64:  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v0, 0x40934a00 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, 0 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v2 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x40934a00  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1] +; GFX11-SDAG-NEXT:    global_store_b64 v0, v[0:1], s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm  ;  ; GFX11-GISEL-LABEL: test_i_f64:  ; GFX11-GISEL:       ; %bb.0:  ; GFX11-GISEL-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24 -; GFX11-GISEL-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, 0x40934a00 -; GFX11-GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-GISEL-NEXT:    v_permlane64_b32 v0, v2 -; GFX11-GISEL-NEXT:    v_permlane64_b32 v1, v1 +; GFX11-GISEL-NEXT:    v_mov_b32_e32 v0, 0 +; GFX11-GISEL-NEXT:    v_dual_mov_b32 v1, 0x40934a00 :: v_dual_mov_b32 v2, 0  ; GFX11-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; GFX11-GISEL-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-GISEL-NEXT:    s_endpgm diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll index b0149f7..672b658 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.permlane64.ptr.ll @@ -6,12 +6,9 @@ define amdgpu_kernel void @test_p0(ptr addrspace(1) %out, ptr %src0) {  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s3 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v2 -; GFX11-SDAG-NEXT:    global_store_b64 v3, v[0:1], s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s3 +; GFX11-SDAG-NEXT:    v_mov_b32_e32 v0, s2 +; GFX11-SDAG-NEXT:    global_store_b64 v2, v[0:1], s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call ptr @llvm.amdgcn.permlane64.p0(ptr %src0)    store ptr %v, ptr addrspace(1) %out @@ -22,21 +19,14 @@ define amdgpu_kernel void @test_v3p0(ptr addrspace(1) %out, <3 x ptr> %src0) {  ; GFX11-SDAG-LABEL: test_v3p0:  ; GFX11-SDAG:       ; %bb.0:  ; GFX11-SDAG-NEXT:    s_clause 0x2 -; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x44  ; GFX11-SDAG-NEXT:    s_load_b64 s[6:7], s[4:5], 0x54 +; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x44  ; GFX11-SDAG-NEXT:    s_load_b64 s[4:5], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v1, s2 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v5, s7 -; GFX11-SDAG-NEXT:    v_mov_b32_e32 v8, s6 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v7, s0 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v2, v1 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v4 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v5, v5 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v4, v8 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v3, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v7 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v6, 0 :: v_dual_mov_b32 v5, s7 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v1, s1 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3 +; GFX11-SDAG-NEXT:    v_mov_b32_e32 v2, s2  ; GFX11-SDAG-NEXT:    s_clause 0x1  ; GFX11-SDAG-NEXT:    global_store_b64 v6, v[4:5], s[4:5] offset:16  ; GFX11-SDAG-NEXT:    global_store_b128 v6, v[0:3], s[4:5] @@ -53,10 +43,8 @@ define amdgpu_kernel void @test_p3(ptr addrspace(1) %out, ptr addrspace(3) %src0  ; GFX11-SDAG-NEXT:    s_load_b32 s2, s[4:5], 0x2c  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call ptr addrspace(3) @llvm.amdgcn.permlane64.v3p0(ptr addrspace(3) %src0)    store ptr addrspace(3) %v, ptr addrspace(1) %out @@ -70,14 +58,9 @@ define amdgpu_kernel void @test_v3p3(ptr addrspace(1) %out, <3 x ptr addrspace(3  ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x34  ; GFX11-SDAG-NEXT:    s_load_b64 s[4:5], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v2, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v1 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_4) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v3 -; GFX11-SDAG-NEXT:    global_store_b96 v4, v[0:2], s[4:5] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2 +; GFX11-SDAG-NEXT:    global_store_b96 v3, v[0:2], s[4:5]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call <3 x ptr addrspace(3)> @llvm.amdgcn.permlane64.v3p3(<3 x ptr addrspace(3)> %src0)    store <3 x ptr addrspace(3)> %v, ptr addrspace(1) %out @@ -91,10 +74,8 @@ define amdgpu_kernel void @test_p5(ptr addrspace(1) %out, ptr addrspace(5) %src0  ; GFX11-SDAG-NEXT:    s_load_b32 s2, s[4:5], 0x2c  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call ptr addrspace(5) @llvm.amdgcn.permlane64.p5(ptr addrspace(5) %src0)    store ptr addrspace(5) %v, ptr addrspace(1) %out @@ -108,14 +89,9 @@ define amdgpu_kernel void @test_v3p5(ptr addrspace(1) %out, <3 x ptr addrspace(5  ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x34  ; GFX11-SDAG-NEXT:    s_load_b64 s[4:5], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v2, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v1 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_4) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v3 -; GFX11-SDAG-NEXT:    global_store_b96 v4, v[0:2], s[4:5] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2 +; GFX11-SDAG-NEXT:    global_store_b96 v3, v[0:2], s[4:5]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call <3 x ptr addrspace(5)> @llvm.amdgcn.permlane64.v3p5(<3 x ptr addrspace(5)> %src0)    store <3 x ptr addrspace(5)> %v, ptr addrspace(1) %out @@ -129,10 +105,8 @@ define amdgpu_kernel void @test_p6(ptr addrspace(1) %out, ptr addrspace(6) %src0  ; GFX11-SDAG-NEXT:    s_load_b32 s2, s[4:5], 0x2c  ; GFX11-SDAG-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v0 -; GFX11-SDAG-NEXT:    global_store_b32 v1, v0, s[0:1] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX11-SDAG-NEXT:    global_store_b32 v0, v1, s[0:1]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call ptr addrspace(6) @llvm.amdgcn.permlane64.p6(ptr addrspace(6) %src0)    store ptr addrspace(6) %v, ptr addrspace(1) %out @@ -146,14 +120,9 @@ define amdgpu_kernel void @test_v3p6(ptr addrspace(1) %out, <3 x ptr addrspace(6  ; GFX11-SDAG-NEXT:    s_load_b128 s[0:3], s[4:5], 0x34  ; GFX11-SDAG-NEXT:    s_load_b64 s[4:5], s[4:5], 0x24  ; GFX11-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, s0 -; GFX11-SDAG-NEXT:    v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s1 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v2, v0 -; GFX11-SDAG-NEXT:    v_permlane64_b32 v1, v1 -; GFX11-SDAG-NEXT:    s_delay_alu instid0(VALU_DEP_4) -; GFX11-SDAG-NEXT:    v_permlane64_b32 v0, v3 -; GFX11-SDAG-NEXT:    global_store_b96 v4, v[0:2], s[4:5] +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v0, s0 +; GFX11-SDAG-NEXT:    v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2 +; GFX11-SDAG-NEXT:    global_store_b96 v3, v[0:2], s[4:5]  ; GFX11-SDAG-NEXT:    s_endpgm    %v = call <3 x ptr addrspace(6)> @llvm.amdgcn.permlane64.v3p6(<3 x ptr addrspace(6)> %src0)    store <3 x ptr addrspace(6)> %v, ptr addrspace(1) %out diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll index d1ba892..02d2990 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll @@ -396,8 +396,7 @@ define amdgpu_kernel void @test_readfirstlane_imm_f64(ptr addrspace(1) %out) {  ;  ; CHECK-GISEL-LABEL: test_readfirstlane_imm_f64:  ; CHECK-GISEL:       ; %bb.0: -; CHECK-GISEL-NEXT:    s_mov_b32 s0, 0 -; CHECK-GISEL-NEXT:    s_mov_b32 s1, 0x40400000 +; CHECK-GISEL-NEXT:    s_mov_b64 s[0:1], 0x4040000000000000  ; CHECK-GISEL-NEXT:    ;;#ASMSTART  ; CHECK-GISEL-NEXT:    ; use s[0:1]  ; CHECK-GISEL-NEXT:    ;;#ASMEND @@ -456,14 +455,13 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_i64(ptr addrspace(1) %out  ; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_i64:  ; CHECK-GISEL:       ; %bb.0:  ; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 32  ; CHECK-GISEL-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, 32  ; CHECK-GISEL-NEXT:    s_mov_b32 flat_scratch_lo, s13 +; CHECK-GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8  ; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1 -; CHECK-GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, 0  ; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0  ; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]  ; CHECK-GISEL-NEXT:    s_endpgm @@ -490,15 +488,13 @@ define amdgpu_kernel void @test_readfirstlane_imm_fold_f64(ptr addrspace(1) %out  ; CHECK-GISEL-LABEL: test_readfirstlane_imm_fold_f64:  ; CHECK-GISEL:       ; %bb.0:  ; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT:    s_mov_b32 s2, 0  ; CHECK-GISEL-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT:    s_mov_b32 s3, 0x40400000 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2 -; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, 0  ; CHECK-GISEL-NEXT:    s_mov_b32 flat_scratch_lo, s13  ; CHECK-GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40400000  ; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0  ; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]  ; CHECK-GISEL-NEXT:    s_endpgm @@ -588,17 +584,17 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_i64(ptr addrspace(1  ; CHECK-SDAG:       ; %bb.0:  ; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0  ; CHECK-SDAG-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8  ; CHECK-SDAG-NEXT:    ;;#ASMSTART  ; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0  ; CHECK-SDAG-NEXT:    ;;#ASMEND +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13  ; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]  ; CHECK-SDAG-NEXT:    s_endpgm  ;  ; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_i64: @@ -628,17 +624,17 @@ define amdgpu_kernel void @test_readfirstlane_copy_from_sgpr_f64(ptr addrspace(1  ; CHECK-SDAG:       ; %bb.0:  ; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0  ; CHECK-SDAG-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8  ; CHECK-SDAG-NEXT:    ;;#ASMSTART  ; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0  ; CHECK-SDAG-NEXT:    ;;#ASMEND +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13  ; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]  ; CHECK-SDAG-NEXT:    s_endpgm  ;  ; CHECK-GISEL-LABEL: test_readfirstlane_copy_from_sgpr_f64: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll index 7ff5eb4..0795f40 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll @@ -9,7 +9,7 @@ declare double @llvm.amdgcn.readlane.f64(double, i32) #0  define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1 {  ; CHECK-SDAG-LABEL: test_readlane_sreg_sreg_i32:  ; CHECK-SDAG:       ; %bb.0: -; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0 +; CHECK-SDAG-NEXT:    s_load_dword s0, s[8:9], 0x0  ; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0)  ; CHECK-SDAG-NEXT:    ;;#ASMSTART  ; CHECK-SDAG-NEXT:    ; use s0 @@ -18,7 +18,7 @@ define amdgpu_kernel void @test_readlane_sreg_sreg_i32(i32 %src0, i32 %src1) #1  ;  ; CHECK-GISEL-LABEL: test_readlane_sreg_sreg_i32:  ; CHECK-GISEL:       ; %bb.0: -; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0 +; CHECK-GISEL-NEXT:    s_load_dword s0, s[8:9], 0x0  ; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; CHECK-GISEL-NEXT:    ;;#ASMSTART  ; CHECK-GISEL-NEXT:    ; use s0 @@ -224,14 +224,13 @@ define amdgpu_kernel void @test_readlane_imm_sreg_i64(ptr addrspace(1) %out, i32  ; CHECK-GISEL-LABEL: test_readlane_imm_sreg_i64:  ; CHECK-GISEL:       ; %bb.0:  ; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT:    s_mov_b64 s[2:3], 32  ; CHECK-GISEL-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, 32  ; CHECK-GISEL-NEXT:    s_mov_b32 flat_scratch_lo, s13 +; CHECK-GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8  ; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0)  ; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1 -; CHECK-GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, 0  ; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0  ; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]  ; CHECK-GISEL-NEXT:    s_endpgm @@ -258,15 +257,13 @@ define amdgpu_kernel void @test_readlane_imm_sreg_f64(ptr addrspace(1) %out, i32  ; CHECK-GISEL-LABEL: test_readlane_imm_sreg_f64:  ; CHECK-GISEL:       ; %bb.0:  ; CHECK-GISEL-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0 -; CHECK-GISEL-NEXT:    s_mov_b32 s2, 0  ; CHECK-GISEL-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-GISEL-NEXT:    s_mov_b32 s3, 0x40400000 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, s2 -; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0) -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v0, 0  ; CHECK-GISEL-NEXT:    s_mov_b32 flat_scratch_lo, s13  ; CHECK-GISEL-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 -; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, s3 +; CHECK-GISEL-NEXT:    s_waitcnt lgkmcnt(0) +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v3, s1 +; CHECK-GISEL-NEXT:    v_mov_b32_e32 v1, 0x40400000  ; CHECK-GISEL-NEXT:    v_mov_b32_e32 v2, s0  ; CHECK-GISEL-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]  ; CHECK-GISEL-NEXT:    s_endpgm @@ -660,17 +657,17 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_i64(ptr addrspace(1) %ou  ; CHECK-SDAG:       ; %bb.0:  ; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0  ; CHECK-SDAG-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8  ; CHECK-SDAG-NEXT:    ;;#ASMSTART  ; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0  ; CHECK-SDAG-NEXT:    ;;#ASMEND +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13  ; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]  ; CHECK-SDAG-NEXT:    s_endpgm  ;  ; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_i64: @@ -700,17 +697,17 @@ define amdgpu_kernel void @test_readlane_copy_from_sgpr_f64(ptr addrspace(1) %ou  ; CHECK-SDAG:       ; %bb.0:  ; CHECK-SDAG-NEXT:    s_load_dwordx2 s[0:1], s[8:9], 0x0  ; CHECK-SDAG-NEXT:    s_add_i32 s12, s12, s17 -; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13 -; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8  ; CHECK-SDAG-NEXT:    ;;#ASMSTART  ; CHECK-SDAG-NEXT:    s_mov_b64 s[2:3], 0  ; CHECK-SDAG-NEXT:    ;;#ASMEND +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s2 +; CHECK-SDAG-NEXT:    s_mov_b32 flat_scratch_lo, s13  ; CHECK-SDAG-NEXT:    s_waitcnt lgkmcnt(0) -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s1 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s2 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s3 -; CHECK-SDAG-NEXT:    v_mov_b32_e32 v2, s0 -; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[2:3], v[0:1] +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v0, s0 +; CHECK-SDAG-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v1, s1 +; CHECK-SDAG-NEXT:    v_mov_b32_e32 v3, s3 +; CHECK-SDAG-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]  ; CHECK-SDAG-NEXT:    s_endpgm  ;  ; CHECK-GISEL-LABEL: test_readlane_copy_from_sgpr_f64: diff --git a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll index c573253..48ed5c4 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll +++ b/llvm/test/CodeGen/AMDGPU/schedule-amdgpu-trackers.ll @@ -73,10 +73,10 @@ define amdgpu_kernel void @constant_zextload_v64i16_to_v64i32(ptr addrspace(1) %  }  ; CHECK-LABEL: {{^}}excess_soft_clause_reg_pressure: -; GFX908:    NumSgprs: 64 -; GFX908-GCNTRACKERS:    NumSgprs: 64 +; GFX908:    NumSgprs: 56 +; GFX908-GCNTRACKERS:    NumSgprs: 56  ; GFX908:    NumVgprs: 43 -; GFX908-GCNTRACKERS:    NumVgprs: 39 +; GFX908-GCNTRACKERS:    NumVgprs: 40  ; GFX908:    Occupancy: 5  ; GFX908-GCNTRACKERS:    Occupancy: 6 diff --git a/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll b/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll index 586579f..ef96944 100644 --- a/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll +++ b/llvm/test/CodeGen/AMDGPU/spill-vgpr-to-agpr-update-regscavenger.ll @@ -20,38 +20,33 @@ define void @test() {  ; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1  ; CHECK-NEXT:  .LBB0_3: ; %bb.3  ; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1 -; CHECK-NEXT:    ; implicit-def: $sgpr4 -; CHECK-NEXT:    v_mov_b32_e32 v0, s4 -; CHECK-NEXT:    v_readfirstlane_b32 s6, v0  ; CHECK-NEXT:    s_mov_b64 s[4:5], -1 -; CHECK-NEXT:    s_mov_b32 s7, 0 -; CHECK-NEXT:    s_cmp_eq_u32 s6, s7  ; CHECK-NEXT:    ; implicit-def: $vgpr1 : SGPR spill to VGPR lane  ; CHECK-NEXT:    v_writelane_b32 v1, s4, 0  ; CHECK-NEXT:    v_writelane_b32 v1, s5, 1 -; CHECK-NEXT:    s_mov_b64 s[10:11], exec -; CHECK-NEXT:    s_mov_b64 exec, -1 +; CHECK-NEXT:    s_or_saveexec_b64 s[8:9], -1 +; CHECK-NEXT:    s_nop 0  ; CHECK-NEXT:    v_accvgpr_write_b32 a0, v1 ; Reload Reuse -; CHECK-NEXT:    s_mov_b64 exec, s[10:11] +; CHECK-NEXT:    s_mov_b64 exec, s[8:9]  ; CHECK-NEXT:    s_cbranch_scc1 .LBB0_5  ; CHECK-NEXT:  ; %bb.4: ; %bb.4  ; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1 -; CHECK-NEXT:    s_or_saveexec_b64 s[10:11], -1 +; CHECK-NEXT:    s_or_saveexec_b64 s[8:9], -1  ; CHECK-NEXT:    v_accvgpr_read_b32 v1, a0 ; Reload Reuse -; CHECK-NEXT:    s_mov_b64 exec, s[10:11] +; CHECK-NEXT:    s_mov_b64 exec, s[8:9]  ; CHECK-NEXT:    s_mov_b64 s[4:5], 0  ; CHECK-NEXT:    v_writelane_b32 v1, s4, 0  ; CHECK-NEXT:    v_writelane_b32 v1, s5, 1 -; CHECK-NEXT:    s_or_saveexec_b64 s[10:11], -1 +; CHECK-NEXT:    s_or_saveexec_b64 s[8:9], -1  ; CHECK-NEXT:    s_nop 0  ; CHECK-NEXT:    v_accvgpr_write_b32 a0, v1 ; Reload Reuse -; CHECK-NEXT:    s_mov_b64 exec, s[10:11] +; CHECK-NEXT:    s_mov_b64 exec, s[8:9]  ; CHECK-NEXT:  .LBB0_5: ; %Flow  ; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1 -; CHECK-NEXT:    s_or_saveexec_b64 s[10:11], -1 +; CHECK-NEXT:    s_or_saveexec_b64 s[8:9], -1  ; CHECK-NEXT:    s_nop 0  ; CHECK-NEXT:    v_accvgpr_read_b32 v1, a0 ; Reload Reuse -; CHECK-NEXT:    s_mov_b64 exec, s[10:11] +; CHECK-NEXT:    s_mov_b64 exec, s[8:9]  ; CHECK-NEXT:    v_readlane_b32 s4, v1, 0  ; CHECK-NEXT:    v_readlane_b32 s5, v1, 1  ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5] diff --git a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll index 5aafb0f..364598f 100644 --- a/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll +++ b/llvm/test/CodeGen/AMDGPU/splitkit-getsubrangeformask.ll @@ -31,8 +31,8 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x    ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:sgpr_32 = COPY $sgpr10    ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:sgpr_32 = COPY $sgpr8    ; CHECK-NEXT:   undef [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub0_sub1:sgpr_128 = S_LOAD_DWORDX2_IMM [[COPY]], 232, 0 :: (invariant load (s64) from %ir.39, addrspace 4) -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %125:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   KILL undef %125:sgpr_128 +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %117:sgpr_128, 0, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   KILL undef %117:sgpr_128    ; CHECK-NEXT:   [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 4, implicit-def dead $scc    ; CHECK-NEXT:   [[S_LSHL_B32_1:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 4, implicit-def dead $scc    ; CHECK-NEXT:   [[S_LSHL_B32_2:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY3]], 4, implicit-def dead $scc @@ -44,87 +44,85 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x    ; CHECK-NEXT:   [[S_SUB_I32_1:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM]], 30, implicit-def dead $scc    ; CHECK-NEXT:   undef [[S_ADD_U32_:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc    ; CHECK-NEXT:   [[S_ADD_U32_:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.81, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 16, 0 :: (invariant load (s128) from %ir.71, addrspace 4)    ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM1:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM undef %74:sreg_64, 0, 0 :: (invariant load (s128) from `ptr addrspace(4) poison`, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_]], 64, 0 :: (invariant load (s128) from %ir.88, addrspace 4)    ; CHECK-NEXT:   KILL undef %74:sreg_64    ; CHECK-NEXT:   KILL [[S_ADD_U32_]].sub0, [[S_ADD_U32_]].sub1    ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_LOAD_DWORDX4_IMM]], 0, 0 :: (dereferenceable invariant load (s32))    ; CHECK-NEXT:   [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec    ; CHECK-NEXT:   undef [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = S_MOV_B32 0 -  ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %118:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %89:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) +  ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET undef %112:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) +  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], undef %87:sgpr_128, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   KILL undef %89:sgpr_128 -  ; CHECK-NEXT:   KILL undef %118:sgpr_128 +  ; CHECK-NEXT:   KILL undef %112:sgpr_128 +  ; CHECK-NEXT:   KILL undef %87:sgpr_128    ; CHECK-NEXT:   [[S_SUB_I32_2:%[0-9]+]]:sreg_32 = S_SUB_I32 [[S_BUFFER_LOAD_DWORD_IMM1]], 31, implicit-def dead $scc    ; CHECK-NEXT:   undef [[S_ADD_U32_1:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_]], implicit-def $scc    ; CHECK-NEXT:   [[S_ADD_U32_1:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc    ; CHECK-NEXT:   undef [[S_ADD_U32_2:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_1]], implicit-def $scc    ; CHECK-NEXT:   [[S_ADD_U32_2:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], [[S_LSHL_B32_2]], implicit-def $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM2:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.87, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.93, addrspace 4) -  ; CHECK-NEXT:   KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1 +  ; CHECK-NEXT:   [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %148:sreg_32, 31, implicit-def dead $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_3:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %148:sreg_32, implicit-def $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM3:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_1]], 64, 0 :: (invariant load (s128) from %ir.77, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_2]], 64, 0 :: (invariant load (s128) from %ir.83, addrspace 4)    ; CHECK-NEXT:   KILL [[S_ADD_U32_2]].sub0, [[S_ADD_U32_2]].sub1 -  ; CHECK-NEXT:   [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_ASHR_I32_3:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 undef %169:sreg_32, 31, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY6]], undef %169:sreg_32, implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %169:sreg_32, implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   KILL [[S_ADD_U32_1]].sub0, [[S_ADD_U32_1]].sub1 +  ; CHECK-NEXT:   [[S_ADD_U32_3:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %54:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_4:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_4:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_5:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_1]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_5:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_6:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, undef %148:sreg_32, implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_6:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_3]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_7:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY7]].sub0, [[S_LSHL_B32_2]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_7:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %51:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_8:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY8]], [[S_LSHL_B32_]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_8:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %48:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_9:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_1]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_9:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_10:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY9]], [[S_LSHL_B32_2]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_10:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %45:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc    ; CHECK-NEXT:   [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_]], 16, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_1:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_2]], 16, implicit-def dead $scc    ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %302:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], undef %279:sreg_32, 0, 0 :: (dereferenceable invariant load (s32))    ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM [[S_MOV_B32_]], [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32))    ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[S_MOV_B32_]], 16, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %357:sgpr_128, undef %358:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %368:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM4:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.99, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 64, 0 :: (invariant load (s128) from %ir.107, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.117, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 0, 0 :: (invariant load (s128) from %ir.124, addrspace 4) -  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %352:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %363:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %334:sgpr_128, undef %335:sreg_32, 0, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM3:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %345:sgpr_128, 16, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM5:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_3]], 64, 0 :: (invariant load (s128) from %ir.95, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM6:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 0, 0 :: (invariant load (s128) from %ir.100, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM7:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 0, 0 :: (invariant load (s128) from %ir.105, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM8:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 0, 0 :: (invariant load (s128) from %ir.112, addrspace 4) +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %329:sgpr_128, [[S_ADD_I32_]], 0, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM5:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %340:sgpr_128, [[S_ADD_I32_1]], 0, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) +  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ADD_I32_2:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM]], -98, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_3:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM1]], -114, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_4:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM2]], -130, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_5:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM2]], -178, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_11:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY10]], [[S_LSHL_B32_]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_11:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %42:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_12:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_12:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_13:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_1]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_13:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_14:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY11]], [[S_LSHL_B32_2]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_14:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %39:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc    ; CHECK-NEXT:   [[S_LSHL_B32_3:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY12]], 4, implicit-def dead $scc -  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM4]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) +  ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN4:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM2]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ADD_I32_6:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_LSHL_B32_3]], 16, implicit-def dead $scc -  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %384:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32)) +  ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_SGPR_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_SGPR_IMM undef %361:sgpr_128, [[S_ADD_I32_6]], 0, 0 :: (dereferenceable invariant load (s32))    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN5:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM5]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.129, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.145, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM9:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 224, 0 :: (invariant load (s128) from %ir.117, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM10:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[COPY7]], 224, 0 :: (invariant load (s128) from %ir.133, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM11:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_4]], 576, 0 :: (invariant load (s128) from %ir.138, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN6:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM6]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 224, 0 :: (invariant load (s128) from %ir.134, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.162, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 224, 0 :: (invariant load (s128) from %ir.140, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM12:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_5]], 224, 0 :: (invariant load (s128) from %ir.122, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM13:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_6]], 576, 0 :: (invariant load (s128) from %ir.150, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM14:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 224, 0 :: (invariant load (s128) from %ir.128, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN7:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM7]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN8:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM8]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ADD_I32_7:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM4]], -217, implicit-def dead $scc @@ -135,49 +133,49 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x    ; CHECK-NEXT:   [[S_ADD_I32_12:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -329, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_13:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -345, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_14:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM6]], -441, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_15:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_2]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_15:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc    ; CHECK-NEXT:   [[S_LSHL_B32_4:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY13]], 4, implicit-def dead $scc    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN9:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM9]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ASHR_I32_4:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_4]], 31, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   undef [[S_ADD_U32_16:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY2]], [[S_LSHL_B32_4]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_16:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %36:sreg_32, [[S_ASHR_I32_4]], implicit-def dead $scc, implicit $scc    ; CHECK-NEXT:   [[S_LSHL_B32_5:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY5]], 3, implicit-def dead $scc    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN10:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM12]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ASHR_I32_5:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_5]], 31, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s32) from %ir.273, align 8, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 576, 0 :: (invariant load (s128) from %ir.157, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_17:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_5]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_17:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_5]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_17]], 168, 0 :: (invariant load (s32) from %ir.260, align 8, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM15:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_7]], 576, 0 :: (invariant load (s128) from %ir.145, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN11:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM14]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN12:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM10]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN13:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM11]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub3:sgpr_128 = S_MOV_B32 553734060    ; CHECK-NEXT:   [[S_LOAD_DWORDX2_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 -1    ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.170, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM16:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_8]], 0, 0 :: (invariant load (s128) from %ir.158, addrspace 4)    ; CHECK-NEXT:   [[COPY15:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub1    ; CHECK-NEXT:   [[COPY15:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORD_IMM]]    ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM4:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY15]], 0, 0 :: (dereferenceable invariant load (s32))    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN14:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM15]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN15:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM13]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.178, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.183, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM17:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_9]], 0, 0 :: (invariant load (s128) from %ir.166, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM18:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_10]], 0, 0 :: (invariant load (s128) from %ir.171, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN16:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM16]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_LSHL_B32_6:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY4]], 3, implicit-def dead $scc    ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ASHR_I32_6:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_6]], 31, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_15:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM4]], -467, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.282, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_18:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_6]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_18:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_6]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORDX2_IMM1:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_18]], 168, 0 :: (invariant load (s64) from %ir.269, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET2:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM17]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_DWORD_OFFSET3:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET [[S_LOAD_DWORDX4_IMM18]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.205, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.211, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM19:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_11]], 0, 0 :: (invariant load (s128) from %ir.193, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM20:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_12]], 0, 0 :: (invariant load (s128) from %ir.199, addrspace 4)    ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]] -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.216, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 0, 0 :: (invariant load (s128) from %ir.221, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM21:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_13]], 0, 0 :: (invariant load (s128) from %ir.204, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM22:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_14]], 0, 0 :: (invariant load (s128) from %ir.209, addrspace 4)    ; CHECK-NEXT:   [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM1]].sub1, 65535, implicit-def dead $scc    ; CHECK-NEXT:   [[COPY16:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM1]].sub0    ; CHECK-NEXT:   [[COPY16:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_]] @@ -189,30 +187,30 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN20:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM22]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[S_ASHR_I32_7:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_7]], 31, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_16:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM5]], -468, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s64) from %ir.293, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_19:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_7]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_19:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_7]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORDX2_IMM2:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[S_ADD_U32_19]], 168, 0 :: (invariant load (s64) from %ir.280, addrspace 4)    ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]    ; CHECK-NEXT:   [[S_AND_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORDX2_IMM2]].sub1, 65535, implicit-def dead $scc    ; CHECK-NEXT:   [[COPY17:%[0-9]+]].sub0:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM2]].sub0    ; CHECK-NEXT:   [[COPY17:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_1]]    ; CHECK-NEXT:   [[S_BUFFER_LOAD_DWORD_IMM6:%[0-9]+]]:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM [[COPY17]], 0, 0 :: (dereferenceable invariant load (s32)) -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.256, addrspace 4) -  ; CHECK-NEXT:   [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %470:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4) -  ; CHECK-NEXT:   KILL [[S_ADD_U32_16]].sub0, [[S_ADD_U32_16]].sub1 -  ; CHECK-NEXT:   KILL undef %470:sreg_64 +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM23:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_15]], 160, 0 :: (invariant load (s128) from %ir.244, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %443:sreg_64, 0, 0 :: (invariant load (s32) from `ptr addrspace(4) poison`, addrspace 4) +  ; CHECK-NEXT:   KILL [[S_ADD_U32_15]].sub0, [[S_ADD_U32_15]].sub1    ; CHECK-NEXT:   KILL [[COPY17]].sub0_sub1_sub2, [[COPY17]].sub3 +  ; CHECK-NEXT:   KILL undef %443:sreg_64    ; CHECK-NEXT:   [[S_LSHL_B32_8:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[COPY14]], 3, implicit-def dead $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_17]], 160, 0 :: (invariant load (s128) from %ir.265, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM24:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_16]], 160, 0 :: (invariant load (s128) from %ir.252, addrspace 4)    ; CHECK-NEXT:   [[S_ASHR_I32_8:%[0-9]+]]:sreg_32_xm0 = S_ASHR_I32 [[S_LSHL_B32_8]], 31, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_17:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM6]], -469, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_21]], 168, 0 :: (invariant load (s32) from %ir.305, align 8, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_20:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY]].sub0, [[S_LSHL_B32_8]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_20:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %57:sreg_32, [[S_ASHR_I32_8]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[S_ADD_U32_20]], 168, 0 :: (invariant load (s32) from %ir.291, align 8, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN21:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM23]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN22:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM24]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM24]]    ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM23]] +  ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM24]]    ; CHECK-NEXT:   [[S_AND_B32_2:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_LOAD_DWORD_IMM1]], 65535, implicit-def dead $scc    ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:sgpr_128 = COPY [[S_LOAD_DWORDX2_IMM]]    ; CHECK-NEXT:   [[COPY18:%[0-9]+]].sub1:sgpr_128 = COPY [[S_AND_B32_2]] @@ -224,22 +222,22 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x    ; CHECK-NEXT:   [[S_ADD_I32_21:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -507, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_22:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_SGPR_IMM3]], -539, implicit-def dead $scc    ; CHECK-NEXT:   [[S_ADD_I32_23:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM7]], -473, implicit-def dead $scc -  ; CHECK-NEXT:   undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.323, addrspace 4) -  ; CHECK-NEXT:   undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.329, addrspace 4) -  ; CHECK-NEXT:   undef [[S_ADD_U32_24:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc -  ; CHECK-NEXT:   [[S_ADD_U32_24:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc -  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_24]], 96, 0 :: (invariant load (s128) from %ir.335, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_21:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_21:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM25:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_21]], 96, 0 :: (invariant load (s128) from %ir.309, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_22:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_1]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_22:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_1]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM26:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_22]], 96, 0 :: (invariant load (s128) from %ir.315, addrspace 4) +  ; CHECK-NEXT:   undef [[S_ADD_U32_23:%[0-9]+]].sub0:sreg_64 = S_ADD_U32 [[COPY1]], [[S_LSHL_B32_2]], implicit-def $scc +  ; CHECK-NEXT:   [[S_ADD_U32_23:%[0-9]+]].sub1:sreg_64 = S_ADDC_U32 undef %33:sreg_32, [[S_ASHR_I32_2]], implicit-def dead $scc, implicit $scc +  ; CHECK-NEXT:   [[S_LOAD_DWORDX4_IMM27:%[0-9]+]]:sgpr_128 = S_LOAD_DWORDX4_IMM [[S_ADD_U32_23]], 96, 0 :: (invariant load (s128) from %ir.321, addrspace 4)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN23:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM25]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN24:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM26]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)    ; CHECK-NEXT:   [[BUFFER_LOAD_FORMAT_X_IDXEN25:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_FORMAT_X_IDXEN [[V_MOV_B32_e32_]], [[S_LOAD_DWORDX4_IMM27]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8) -  ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM27]]    ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM25]] -  ; CHECK-NEXT:   KILL [[V_MOV_B32_e32_]]    ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM26]] +  ; CHECK-NEXT:   KILL [[V_MOV_B32_e32_]] +  ; CHECK-NEXT:   KILL [[S_LOAD_DWORDX4_IMM27]]    ; CHECK-NEXT:   [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -2, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec    ; CHECK-NEXT:   [[V_ADD_U32_e64_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -1, [[BUFFER_LOAD_FORMAT_X_IDXEN1]], 0, implicit $exec    ; CHECK-NEXT:   [[V_ADD_U32_e64_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -3, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec @@ -351,13 +349,13 @@ define amdgpu_gs void @_amdgpu_gs_main(i32 inreg %primShaderTableAddrLow, <31 x    ; CHECK-NEXT:   [[V_OR_B32_e64_64:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_63]], [[V_ADD_U32_e64_28]], implicit $exec    ; CHECK-NEXT:   [[V_ADD_U32_e64_30:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 -593, [[BUFFER_LOAD_FORMAT_X_IDXEN]], 0, implicit $exec    ; CHECK-NEXT:   [[V_OR_B32_e64_65:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_64]], [[V_ADD_U32_e64_29]], implicit $exec -  ; CHECK-NEXT:   [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %543:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) poison`, addrspace 4) +  ; CHECK-NEXT:   [[S_LOAD_DWORDX8_IMM:%[0-9]+]]:sgpr_256 = S_LOAD_DWORDX8_IMM undef %516:sreg_64, 0, 0 :: (invariant load (s256) from `ptr addrspace(4) poison`, addrspace 4)    ; CHECK-NEXT:   [[V_OR_B32_e64_66:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_OR_B32_e64_65]], [[V_ADD_U32_e64_30]], implicit $exec    ; CHECK-NEXT:   [[S_ADD_I32_24:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_BUFFER_LOAD_DWORD_IMM8]], -594, implicit-def dead $scc    ; CHECK-NEXT:   [[V_OR_B32_e64_67:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[S_ADD_I32_24]], [[V_OR_B32_e64_66]], implicit $exec    ; CHECK-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 0, [[V_OR_B32_e64_67]], implicit $exec    ; CHECK-NEXT:   undef [[V_CNDMASK_B32_e64_:%[0-9]+]].sub3:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[V_CMP_EQ_U32_e64_]], implicit $exec -  ; CHECK-NEXT:   IMAGE_STORE_V4_V2_nsa_gfx10 [[V_CNDMASK_B32_e64_]], undef %557:vgpr_32, undef %559:vgpr_32, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8) +  ; CHECK-NEXT:   IMAGE_STORE_V4_V2_nsa_gfx10 [[V_CNDMASK_B32_e64_]], undef %530:vgpr_32, undef %532:vgpr_32, [[S_LOAD_DWORDX8_IMM]], 15, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s128), addrspace 8)    ; CHECK-NEXT:   S_ENDPGM 0  .expVert:    %0 = extractelement <31 x i32> %userData, i64 2 diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll index db49339..9c16b3c 100644 --- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -22,8 +22,6 @@  ; GFX9-DAG: s_mov_b32 s[[DESC3:[0-9]+]], 0xe00000  ; OFFREG is offset system SGPR -; GCN: buffer_store_dword {{v[0-9]+}}, off, s[[[DESC0]]:[[DESC3]]], 0 offset:{{[0-9]+}} ; 4-byte Folded Spill -; GCN: buffer_load_dword v{{[0-9]+}}, off, s[[[DESC0]]:[[DESC3]]], 0 offset:{{[0-9]+}} ; 4-byte Folded Reload  ; GCN: NumVgprs: 256  ; GCN: ScratchSize: 640 diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll index ad8dcd3..21f0c00 100644 --- a/llvm/test/CodeGen/AMDGPU/wqm.ll +++ b/llvm/test/CodeGen/AMDGPU/wqm.ll @@ -3477,13 +3477,10 @@ define amdgpu_gs void @wqm_init_exec_wwm() {  ; GFX9-W64-NEXT:    s_mov_b64 exec, 0  ; GFX9-W64-NEXT:    s_mov_b32 s1, 0  ; GFX9-W64-NEXT:    s_mov_b32 s0, s1 -; GFX9-W64-NEXT:    s_cmp_lg_u64 exec, 0 -; GFX9-W64-NEXT:    s_cselect_b64 s[2:3], -1, 0 -; GFX9-W64-NEXT:    s_cmp_lg_u64 s[0:1], 0 +; GFX9-W64-NEXT:    s_cmp_eq_u64 s[0:1], 0  ; GFX9-W64-NEXT:    s_cselect_b64 s[0:1], -1, 0 -; GFX9-W64-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1] -; GFX9-W64-NEXT:    v_cndmask_b32_e64 v0, 0, 1.0, s[0:1] -; GFX9-W64-NEXT:    v_mov_b32_e32 v1, 0 +; GFX9-W64-NEXT:    v_mov_b32_e32 v0, 0 +; GFX9-W64-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]  ; GFX9-W64-NEXT:    exp mrt0 off, off, off, off  ; GFX9-W64-NEXT:    s_endpgm  ; @@ -3491,14 +3488,11 @@ define amdgpu_gs void @wqm_init_exec_wwm() {  ; GFX10-W32:       ; %bb.0:  ; GFX10-W32-NEXT:    s_mov_b32 exec_lo, 0  ; GFX10-W32-NEXT:    s_mov_b32 s1, 0 -; GFX10-W32-NEXT:    s_cmp_lg_u64 exec, 0 +; GFX10-W32-NEXT:    v_mov_b32_e32 v0, 0  ; GFX10-W32-NEXT:    s_mov_b32 s0, s1 -; GFX10-W32-NEXT:    s_cselect_b32 s2, -1, 0 -; GFX10-W32-NEXT:    s_cmp_lg_u64 s[0:1], 0 -; GFX10-W32-NEXT:    v_mov_b32_e32 v1, 0 +; GFX10-W32-NEXT:    s_cmp_eq_u64 s[0:1], 0  ; GFX10-W32-NEXT:    s_cselect_b32 s0, -1, 0 -; GFX10-W32-NEXT:    s_xor_b32 s0, s2, s0 -; GFX10-W32-NEXT:    v_cndmask_b32_e64 v0, 0, 1.0, s0 +; GFX10-W32-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s0  ; GFX10-W32-NEXT:    exp mrt0 off, off, off, off  ; GFX10-W32-NEXT:    s_endpgm    call void @llvm.amdgcn.init.exec(i64 0) | 
