diff options
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll | 612 | 
1 files changed, 612 insertions, 0 deletions
| diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll new file mode 100644 index 0000000..e117200 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/add.ll @@ -0,0 +1,612 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=hawaii < %s | FileCheck -check-prefix=GFX7 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=fiji < %s | FileCheck -check-prefix=GFX8 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefix=GFX12 %s + +define i16 @s_add_i16(i16 inreg %a, i16 inreg %b) { +; GFX7-LABEL: s_add_i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_i32 s16, s16, s17 +; GFX7-NEXT:    v_mov_b32_e32 v0, s16 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_i32 s16, s16, s17 +; GFX9-NEXT:    v_mov_b32_e32 v0, s16 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_i32 s16, s16, s17 +; GFX8-NEXT:    v_mov_b32_e32 v0, s16 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_i32 s16, s16, s17 +; GFX10-NEXT:    v_mov_b32_e32 v0, s16 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_i32 s0, s0, s1 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i16 %a, %b +  ret i16 %c +} + +define i16 @v_add_i16(i16 %a, i16 %b) { +; GFX7-LABEL: v_add_i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_u16_e32 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u16_e32 v0, v0, v1 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_nc_u16 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_nc_u16 v0.l, v0.l, v1.l +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_nc_u16 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i16 %a, %b +  ret i16 %c +} + +define i32 @s_add_i32(i32 inreg %a, i32 inreg %b) { +; GFX7-LABEL: s_add_i32: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_i32 s16, s16, s17 +; GFX7-NEXT:    v_mov_b32_e32 v0, s16 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i32: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_i32 s16, s16, s17 +; GFX9-NEXT:    v_mov_b32_e32 v0, s16 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i32: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_i32 s16, s16, s17 +; GFX8-NEXT:    v_mov_b32_e32 v0, s16 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i32: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_i32 s16, s16, s17 +; GFX10-NEXT:    v_mov_b32_e32 v0, s16 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i32: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_i32 s0, s0, s1 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i32: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i32 %a, %b +  ret i32 %c +} + +define i32 @v_add_i32(i32 %a, i32 %b) { +; GFX7-LABEL: v_add_i32: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v1 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i32: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_u32_e32 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i32: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v1 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i32: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_nc_u32_e32 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i32: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_nc_u32_e32 v0, v0, v1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i32: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_nc_u32_e32 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i32 %a, %b +  ret i32 %c +} + +define <2 x i16> @s_add_v2i16(<2 x i16> inreg %a, <2 x i16> inreg %b) { +; GFX7-LABEL: s_add_v2i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_i32 s16, s16, s18 +; GFX7-NEXT:    s_add_i32 s17, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v0, s16 +; GFX7-NEXT:    v_mov_b32_e32 v1, s17 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_v2i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_lshr_b32 s4, s16, 16 +; GFX9-NEXT:    s_lshr_b32 s5, s17, 16 +; GFX9-NEXT:    s_add_i32 s16, s16, s17 +; GFX9-NEXT:    s_add_i32 s4, s4, s5 +; GFX9-NEXT:    s_pack_ll_b32_b16 s4, s16, s4 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_v2i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_lshr_b32 s4, s16, 16 +; GFX8-NEXT:    s_lshr_b32 s5, s17, 16 +; GFX8-NEXT:    s_add_i32 s4, s4, s5 +; GFX8-NEXT:    s_add_i32 s16, s16, s17 +; GFX8-NEXT:    s_and_b32 s4, 0xffff, s4 +; GFX8-NEXT:    s_and_b32 s5, 0xffff, s16 +; GFX8-NEXT:    s_lshl_b32 s4, s4, 16 +; GFX8-NEXT:    s_or_b32 s4, s5, s4 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_v2i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_lshr_b32 s4, s16, 16 +; GFX10-NEXT:    s_lshr_b32 s5, s17, 16 +; GFX10-NEXT:    s_add_i32 s16, s16, s17 +; GFX10-NEXT:    s_add_i32 s4, s4, s5 +; GFX10-NEXT:    s_pack_ll_b32_b16 s4, s16, s4 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_v2i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_lshr_b32 s2, s0, 16 +; GFX11-NEXT:    s_lshr_b32 s3, s1, 16 +; GFX11-NEXT:    s_add_i32 s0, s0, s1 +; GFX11-NEXT:    s_add_i32 s2, s2, s3 +; GFX11-NEXT:    s_pack_ll_b32_b16 s0, s0, s2 +; GFX11-NEXT:    v_mov_b32_e32 v0, s0 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_v2i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_lshr_b32 s2, s0, 16 +; GFX12-NEXT:    s_lshr_b32 s3, s1, 16 +; GFX12-NEXT:    s_add_co_i32 s0, s0, s1 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    s_add_co_i32 s2, s2, s3 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    s_pack_ll_b32_b16 s0, s0, s2 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_mov_b32_e32 v0, s0 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add <2 x i16> %a, %b +  ret <2 x i16> %c +} + +define <2 x i16> @v_add_v2i16(<2 x i16> %a, <2 x i16> %b) { +; GFX7-LABEL: v_add_v2i16: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_add_i32_e32 v1, vcc, v1, v3 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_v2i16: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_v2i16: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u16_e32 v2, v0, v1 +; GFX8-NEXT:    v_add_u16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT:    v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_v2i16: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_v2i16: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_v2i16: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_pk_add_u16 v0, v0, v1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add <2 x i16> %a, %b +  ret <2 x i16> %c +} + +define i64 @s_add_i64(i64 inreg %a, i64 inreg %b) { +; GFX7-LABEL: s_add_i64: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_u32 s4, s16, s18 +; GFX7-NEXT:    s_addc_u32 s5, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v0, s4 +; GFX7-NEXT:    v_mov_b32_e32 v1, s5 +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_add_i64: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_u32 s4, s16, s18 +; GFX9-NEXT:    s_addc_u32 s5, s17, s19 +; GFX9-NEXT:    v_mov_b32_e32 v0, s4 +; GFX9-NEXT:    v_mov_b32_e32 v1, s5 +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_add_i64: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_u32 s4, s16, s18 +; GFX8-NEXT:    s_addc_u32 s5, s17, s19 +; GFX8-NEXT:    v_mov_b32_e32 v0, s4 +; GFX8-NEXT:    v_mov_b32_e32 v1, s5 +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_i64: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_u32 s4, s16, s18 +; GFX10-NEXT:    s_addc_u32 s5, s17, s19 +; GFX10-NEXT:    v_mov_b32_e32 v0, s4 +; GFX10-NEXT:    v_mov_b32_e32 v1, s5 +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_add_i64: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_u32 s0, s0, s2 +; GFX11-NEXT:    s_addc_u32 s1, s1, s3 +; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_add_i64: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_nc_u64 s[0:1], s[0:1], s[2:3] +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1 +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i64 %a, %b +  ret i64 %c +} + +define i64 @v_add_i64(i64 %a, i64 %b) { +; GFX7-LABEL: v_add_i64: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_add_i64: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_add_i64: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_add_i64: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_add_i64: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_add_i64: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_add_co_ci_u32_e64 v1, null, v1, v3, vcc_lo +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %c = add i64 %a, %b +  ret i64 %c +} + +define void @s_uaddo_uadde(i64 inreg %a, i64 inreg %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: s_uaddo_uadde: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    s_add_u32 s4, s16, s18 +; GFX7-NEXT:    s_addc_u32 s5, s17, s19 +; GFX7-NEXT:    v_mov_b32_e32 v4, s4 +; GFX7-NEXT:    s_mov_b32 s6, 0 +; GFX7-NEXT:    s_cselect_b32 s8, 1, 0 +; GFX7-NEXT:    v_mov_b32_e32 v5, s5 +; GFX7-NEXT:    s_mov_b32 s7, 0xf000 +; GFX7-NEXT:    s_mov_b64 s[4:5], 0 +; GFX7-NEXT:    buffer_store_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 +; GFX7-NEXT:    v_mov_b32_e32 v0, s8 +; GFX7-NEXT:    buffer_store_dword v0, v[2:3], s[4:7], 0 addr64 +; GFX7-NEXT:    s_waitcnt vmcnt(0) +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: s_uaddo_uadde: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    s_add_u32 s4, s16, s18 +; GFX9-NEXT:    s_addc_u32 s5, s17, s19 +; GFX9-NEXT:    v_mov_b32_e32 v4, s4 +; GFX9-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX9-NEXT:    v_mov_b32_e32 v5, s5 +; GFX9-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off +; GFX9-NEXT:    v_mov_b32_e32 v0, s6 +; GFX9-NEXT:    global_store_dword v[2:3], v0, off +; GFX9-NEXT:    s_waitcnt vmcnt(0) +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: s_uaddo_uadde: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    s_add_u32 s4, s16, s18 +; GFX8-NEXT:    s_addc_u32 s5, s17, s19 +; GFX8-NEXT:    v_mov_b32_e32 v4, s4 +; GFX8-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX8-NEXT:    v_mov_b32_e32 v5, s5 +; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[4:5] +; GFX8-NEXT:    v_mov_b32_e32 v0, s6 +; GFX8-NEXT:    flat_store_dword v[2:3], v0 +; GFX8-NEXT:    s_waitcnt vmcnt(0) +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_uaddo_uadde: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    s_add_u32 s4, s16, s18 +; GFX10-NEXT:    s_addc_u32 s5, s17, s19 +; GFX10-NEXT:    s_cselect_b32 s6, 1, 0 +; GFX10-NEXT:    v_mov_b32_e32 v4, s4 +; GFX10-NEXT:    v_mov_b32_e32 v5, s5 +; GFX10-NEXT:    v_mov_b32_e32 v6, s6 +; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[4:5], off +; GFX10-NEXT:    global_store_dword v[2:3], v6, off +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: s_uaddo_uadde: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    s_add_u32 s0, s0, s2 +; GFX11-NEXT:    s_addc_u32 s1, s1, s3 +; GFX11-NEXT:    s_cselect_b32 s2, 1, 0 +; GFX11-NEXT:    v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX11-NEXT:    v_mov_b32_e32 v6, s2 +; GFX11-NEXT:    global_store_b64 v[0:1], v[4:5], off +; GFX11-NEXT:    global_store_b32 v[2:3], v6, off +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: s_uaddo_uadde: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    s_add_co_u32 s0, s0, s2 +; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, s3 +; GFX12-NEXT:    s_cselect_b32 s2, 1, 0 +; GFX12-NEXT:    s_wait_alu 0xfffe +; GFX12-NEXT:    v_dual_mov_b32 v5, s1 :: v_dual_mov_b32 v4, s0 +; GFX12-NEXT:    v_mov_b32_e32 v6, s2 +; GFX12-NEXT:    global_store_b64 v[0:1], v[4:5], off +; GFX12-NEXT:    global_store_b32 v[2:3], v6, off +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) +  %add = extractvalue {i64, i1} %uaddo, 0 +  %of = extractvalue {i64, i1} %uaddo, 1 +  %of32 = select i1 %of, i32 1, i32 0 +  store i64 %add, ptr addrspace(1) %res +  store i32 %of32, ptr addrspace(1) %carry +  ret void +} + +define void @v_uaddo_uadde(i64 %a, i64 %b, ptr addrspace(1) %res, ptr addrspace(1) %carry) { +; GFX7-LABEL: v_uaddo_uadde: +; GFX7:       ; %bb.0: +; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX7-NEXT:    v_add_i32_e32 v0, vcc, v0, v2 +; GFX7-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX7-NEXT:    s_mov_b32 s6, 0 +; GFX7-NEXT:    s_mov_b32 s7, 0xf000 +; GFX7-NEXT:    s_mov_b64 s[4:5], 0 +; GFX7-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX7-NEXT:    buffer_store_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 +; GFX7-NEXT:    buffer_store_dword v2, v[6:7], s[4:7], 0 addr64 +; GFX7-NEXT:    s_waitcnt vmcnt(0) +; GFX7-NEXT:    s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_uaddo_uadde: +; GFX9:       ; %bb.0: +; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX9-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off +; GFX9-NEXT:    global_store_dword v[6:7], v2, off +; GFX9-NEXT:    s_waitcnt vmcnt(0) +; GFX9-NEXT:    s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_uaddo_uadde: +; GFX8:       ; %bb.0: +; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2 +; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc +; GFX8-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc +; GFX8-NEXT:    flat_store_dwordx2 v[4:5], v[0:1] +; GFX8-NEXT:    flat_store_dword v[6:7], v2 +; GFX8-NEXT:    s_waitcnt vmcnt(0) +; GFX8-NEXT:    s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_uaddo_uadde: +; GFX10:       ; %bb.0: +; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX10-NEXT:    global_store_dwordx2 v[4:5], v[0:1], off +; GFX10-NEXT:    global_store_dword v[6:7], v2, off +; GFX10-NEXT:    s_setpc_b64 s[30:31] +; +; GFX11-LABEL: v_uaddo_uadde: +; GFX11:       ; %bb.0: +; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX11-NEXT:    global_store_b64 v[4:5], v[0:1], off +; GFX11-NEXT:    global_store_b32 v[6:7], v2, off +; GFX11-NEXT:    s_setpc_b64 s[30:31] +; +; GFX12-LABEL: v_uaddo_uadde: +; GFX12:       ; %bb.0: +; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT:    s_wait_expcnt 0x0 +; GFX12-NEXT:    s_wait_samplecnt 0x0 +; GFX12-NEXT:    s_wait_bvhcnt 0x0 +; GFX12-NEXT:    s_wait_kmcnt 0x0 +; GFX12-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT:    s_wait_alu 0xfffd +; GFX12-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo +; GFX12-NEXT:    global_store_b64 v[4:5], v[0:1], off +; GFX12-NEXT:    global_store_b32 v[6:7], v2, off +; GFX12-NEXT:    s_setpc_b64 s[30:31] +  %uaddo = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) +  %add = extractvalue {i64, i1} %uaddo, 0 +  %of = extractvalue {i64, i1} %uaddo, 1 +  %of32 = select i1 %of, i32 1, i32 0 +  store i64 %add, ptr addrspace(1) %res +  store i32 %of32, ptr addrspace(1) %carry +  ret void +} + +declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b) | 
