; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -amdgpu-scalar-ir-passes=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN %s define float @select_undef_lhs(float %val, i1 %cond) { ; GCN-LABEL: select_undef_lhs: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %sel = select i1 %cond, float poison, float %val ret float %sel } define float @select_undef_rhs(float %val, i1 %cond) { ; GCN-LABEL: select_undef_rhs: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %sel = select i1 %cond, float %val, float poison ret float %sel } define void @select_undef_n1(ptr addrspace(1) %a, i32 %c) { ; GCN-LABEL: select_undef_n1: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v2, 1.0 ; GCN-NEXT: global_store_dword v[0:1], v2, off ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %cc = icmp eq i32 %c, 0 %sel = select i1 %cc, float 1.000000e+00, float poison store float %sel, ptr addrspace(1) %a ret void } define void @select_undef_n2(ptr addrspace(1) %a, i32 %c) { ; GCN-LABEL: select_undef_n2: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v2, 1.0 ; GCN-NEXT: global_store_dword v[0:1], v2, off ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %cc = icmp eq i32 %c, 0 %sel = select i1 %cc, float poison, float 1.000000e+00 store float %sel, ptr addrspace(1) %a ret void } declare float @llvm.amdgcn.rcp.f32(float) ; Make sure the vector undef isn't lowered into 0s. define amdgpu_kernel void @undef_v6f32(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v6f32: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB4_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[6:9], v0 ; GCN-NEXT: ds_read_b64 v[10:11], v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_add_f32_e32 v3, v9, v3 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_f32_e32 v5, v11, v5 ; GCN-NEXT: v_add_f32_e32 v4, v10, v4 ; GCN-NEXT: v_add_f32_e32 v2, v8, v2 ; GCN-NEXT: v_add_f32_e32 v1, v7, v1 ; GCN-NEXT: v_add_f32_e32 v0, v6, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB4_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b64 v0, v[4:5] ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <6 x float> [ poison, %entry ], [ %add, %loop ] %load = load volatile <6 x float>, ptr addrspace(3) poison %add = fadd <6 x float> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <6 x float> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v6i32(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v6i32: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB5_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[6:9], v0 ; GCN-NEXT: ds_read_b64 v[10:11], v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_add_u32_e32 v3, v9, v3 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_u32_e32 v5, v11, v5 ; GCN-NEXT: v_add_u32_e32 v4, v10, v4 ; GCN-NEXT: v_add_u32_e32 v2, v8, v2 ; GCN-NEXT: v_add_u32_e32 v1, v7, v1 ; GCN-NEXT: v_add_u32_e32 v0, v6, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB5_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b64 v0, v[4:5] ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <6 x i32> [ poison, %entry ], [ %add, %loop ] %load = load volatile <6 x i32>, ptr addrspace(3) poison %add = add <6 x i32> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <6 x i32> %add, ptr addrspace(3) poison ret void } ; Make sure the vector undef isn't lowered into 0s. define amdgpu_kernel void @undef_v5f32(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v5f32: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB6_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[5:8], v0 ; GCN-NEXT: ds_read_b32 v9, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_add_f32_e32 v3, v8, v3 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_f32_e32 v4, v9, v4 ; GCN-NEXT: v_add_f32_e32 v2, v7, v2 ; GCN-NEXT: v_add_f32_e32 v1, v6, v1 ; GCN-NEXT: v_add_f32_e32 v0, v5, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB6_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b32 v0, v4 ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <5 x float> [ poison, %entry ], [ %add, %loop ] %load = load volatile <5 x float>, ptr addrspace(3) poison %add = fadd <5 x float> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <5 x float> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v5i32(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v5i32: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB7_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[5:8], v0 ; GCN-NEXT: ds_read_b32 v9, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_add_u32_e32 v3, v8, v3 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_u32_e32 v4, v9, v4 ; GCN-NEXT: v_add_u32_e32 v2, v7, v2 ; GCN-NEXT: v_add_u32_e32 v1, v6, v1 ; GCN-NEXT: v_add_u32_e32 v0, v5, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB7_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b32 v0, v4 ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <5 x i32> [ poison, %entry ], [ %add, %loop ] %load = load volatile <5 x i32>, ptr addrspace(3) poison %add = add <5 x i32> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <5 x i32> %add, ptr addrspace(3) poison ret void } ; Make sure the vector undef isn't lowered into 0s. define amdgpu_kernel void @undef_v3f64(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v3f64: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 ; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s3, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_mov_b32_e32 v6, s2 ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GCN-NEXT: .LBB8_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[7:10], v6 ; GCN-NEXT: ds_read_b64 v[11:12], v6 offset:16 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_add_f64 v[2:3], v[9:10], v[2:3] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_f64 v[4:5], v[11:12], v[4:5] ; GCN-NEXT: v_add_f64 v[0:1], v[7:8], v[0:1] ; GCN-NEXT: s_cbranch_vccnz .LBB8_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: v_mov_b32_e32 v6, s2 ; GCN-NEXT: ds_write_b64 v6, v[4:5] offset:16 ; GCN-NEXT: ds_write_b128 v6, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <3 x double> [ poison, %entry ], [ %add, %loop ] %load = load volatile <3 x double>, ptr addrspace(3) %ptr %add = fadd <3 x double> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <3 x double> %add, ptr addrspace(3) %ptr ret void } define amdgpu_kernel void @undef_v3i64(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v3i64: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0 ; GCN-NEXT: ; implicit-def: $vgpr4_vgpr5 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s5, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_mov_b32_e32 v6, s4 ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GCN-NEXT: .LBB9_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[7:10], v6 ; GCN-NEXT: ds_read_b64 v[11:12], v6 offset:16 ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_add_co_u32_e64 v0, s[2:3], v7, v0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_co_u32_e32 v4, vcc, v11, v4 ; GCN-NEXT: v_addc_co_u32_e32 v5, vcc, v12, v5, vcc ; GCN-NEXT: v_add_co_u32_e32 v2, vcc, v9, v2 ; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v10, v3, vcc ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: v_addc_co_u32_e64 v1, s[2:3], v8, v1, s[2:3] ; GCN-NEXT: s_cbranch_vccnz .LBB9_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: v_mov_b32_e32 v6, s4 ; GCN-NEXT: ds_write_b64 v6, v[4:5] offset:16 ; GCN-NEXT: ds_write_b128 v6, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <3 x i64> [ poison, %entry ], [ %add, %loop ] %load = load volatile <3 x i64>, ptr addrspace(3) %ptr %add = add <3 x i64> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <3 x i64> %add, ptr addrspace(3) %ptr ret void } ; Make sure the vector undef isn't lowered into 0s. define amdgpu_kernel void @undef_v4f16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v4f16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s3, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_mov_b32_e32 v2, s2 ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB10_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b64 v[3:4], v2 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_f16 v1, v4, v1 ; GCN-NEXT: v_pk_add_f16 v0, v3, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB10_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: v_mov_b32_e32 v2, s2 ; GCN-NEXT: ds_write_b64 v2, v[0:1] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <4 x half> [ poison, %entry ], [ %add, %loop ] %load = load volatile <4 x half>, ptr addrspace(3) %ptr %add = fadd <4 x half> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <4 x half> %add, ptr addrspace(3) %ptr ret void } define amdgpu_kernel void @undef_v4i16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v4i16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s3, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_mov_b32_e32 v2, s2 ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB11_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b64 v[3:4], v2 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v1, v4, v1 ; GCN-NEXT: v_pk_add_u16 v0, v3, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB11_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: v_mov_b32_e32 v2, s2 ; GCN-NEXT: ds_write_b64 v2, v[0:1] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <4 x i16> [ poison, %entry ], [ %add, %loop ] %load = load volatile <4 x i16>, ptr addrspace(3) %ptr %add = add <4 x i16> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <4 x i16> %add, ptr addrspace(3) %ptr ret void } ; Make sure the vector undef isn't lowered into 0s. define amdgpu_kernel void @undef_v2f16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v2f16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s3, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] ; GCN-NEXT: v_mov_b32_e32 v0, s2 ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 ; GCN-NEXT: ; implicit-def: $vgpr1 ; GCN-NEXT: .LBB12_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b32 v2, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_f16 v1, v2, v1 ; GCN-NEXT: s_cbranch_vccnz .LBB12_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: v_mov_b32_e32 v0, s2 ; GCN-NEXT: ds_write_b32 v0, v1 ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <2 x half> [ poison, %entry ], [ %add, %loop ] %load = load volatile <2 x half>, ptr addrspace(3) %ptr %add = fadd <2 x half> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <2 x half> %add, ptr addrspace(3) %ptr ret void } define amdgpu_kernel void @undef_v2i16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v2i16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dwordx2 s[2:3], s[8:9], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s3, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1] ; GCN-NEXT: v_mov_b32_e32 v0, s2 ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v1 ; GCN-NEXT: ; implicit-def: $vgpr1 ; GCN-NEXT: .LBB13_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b32 v2, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v1, v2, v1 ; GCN-NEXT: s_cbranch_vccnz .LBB13_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: v_mov_b32_e32 v0, s2 ; GCN-NEXT: ds_write_b32 v0, v1 ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <2 x i16> [ poison, %entry ], [ %add, %loop ] %load = load volatile <2 x i16>, ptr addrspace(3) %ptr %add = add <2 x i16> %load, %phi br i1 %cond, label %loop, label %ret ret: store volatile <2 x i16> %add, ptr addrspace(3) %ptr ret void } ; We were expanding undef vectors into zero vectors. Optimizations ; would then see we used no elements of the vector, and reform the ; undef vector resulting in a combiner loop. define void @inf_loop_undef_vector(<6 x float> %arg, float %arg1, i64 %arg2) { ; GCN-LABEL: inf_loop_undef_vector: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v6, v7, v[0:1] ; GCN-NEXT: v_mul_lo_u32 v2, v6, v8 ; GCN-NEXT: v_mul_lo_u32 v3, v3, v7 ; GCN-NEXT: v_add3_u32 v1, v3, v1, v2 ; GCN-NEXT: global_store_dwordx2 v[0:1], v[0:1], off ; GCN-NEXT: s_waitcnt vmcnt(0) ; GCN-NEXT: s_setpc_b64 s[30:31] %i = insertelement <6 x float> %arg, float %arg1, i64 2 %i3 = bitcast <6 x float> %i to <3 x i64> %i4 = extractelement <3 x i64> %i3, i64 0 %i5 = extractelement <3 x i64> %i3, i64 1 %i6 = mul i64 %i5, %arg2 %i7 = add i64 %i6, %i4 store volatile i64 %i7, ptr addrspace(1) poison, align 4 ret void } define amdgpu_kernel void @undef_bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB15_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_u16 v1, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_add_u32_e32 v0, v1, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB15_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b16 v0, v0 ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi bfloat [ poison, %entry ], [ %add, %loop ] %load = load volatile bfloat, ptr addrspace(3) poison %bc.0 = bitcast bfloat %load to i16 %bc.1 = bitcast bfloat %phi to i16 %add.i = add i16 %bc.0, %bc.1 %add = bitcast i16 %add.i to bfloat br i1 %cond, label %loop, label %ret ret: store volatile bfloat %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v2bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v2bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB16_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b32 v1, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v0, v1, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB16_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b32 v0, v0 ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <2 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <2 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <2 x bfloat> %load to <2 x i16> %bc.1 = bitcast <2 x bfloat> %phi to <2 x i16> %add.i = add <2 x i16> %bc.0, %bc.1 %add = bitcast <2 x i16> %add.i to <2 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <2 x bfloat> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v3bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v3bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB17_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b32 v2, v0 ; GCN-NEXT: ds_read_u16 v3, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_pk_add_u16 v0, v2, v0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v1, v3, v1 ; GCN-NEXT: s_cbranch_vccnz .LBB17_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b16 v0, v1 ; GCN-NEXT: ds_write_b32 v0, v0 ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <3 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <3 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <3 x bfloat> %load to <3 x i16> %bc.1 = bitcast <3 x bfloat> %phi to <3 x i16> %add.i = add <3 x i16> %bc.0, %bc.1 %add = bitcast <3 x i16> %add.i to <3 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <3 x bfloat> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v4bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v4bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB18_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b64 v[2:3], v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v1, v3, v1 ; GCN-NEXT: v_pk_add_u16 v0, v2, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB18_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b64 v0, v[0:1] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <4 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <4 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <4 x bfloat> %load to <4 x i16> %bc.1 = bitcast <4 x bfloat> %phi to <4 x i16> %add.i = add <4 x i16> %bc.0, %bc.1 %add = bitcast <4 x i16> %add.i to <4 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <4 x bfloat> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v6bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v6bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr2 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB19_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b64 v[3:4], v0 ; GCN-NEXT: ds_read_b32 v5, v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(1) ; GCN-NEXT: v_pk_add_u16 v1, v4, v1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v2, v5, v2 ; GCN-NEXT: v_pk_add_u16 v0, v3, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB19_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b32 v0, v2 ; GCN-NEXT: ds_write_b64 v0, v[0:1] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <6 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <6 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <6 x bfloat> %load to <6 x i16> %bc.1 = bitcast <6 x bfloat> %phi to <6 x i16> %add.i = add <6 x i16> %bc.0, %bc.1 %add = bitcast <6 x i16> %add.i to <6 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <6 x bfloat> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v8bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v8bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB20_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[4:7], v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v3, v7, v3 ; GCN-NEXT: v_pk_add_u16 v2, v6, v2 ; GCN-NEXT: v_pk_add_u16 v1, v5, v1 ; GCN-NEXT: v_pk_add_u16 v0, v4, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB20_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <8 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <8 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <8 x bfloat> %load to <8 x i16> %bc.1 = bitcast <8 x bfloat> %phi to <8 x i16> %add.i = add <8 x i16> %bc.0, %bc.1 %add = bitcast <8 x i16> %add.i to <8 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <8 x bfloat> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v16bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v16bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr4 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB21_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[8:11], v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v7, v11, v7 ; GCN-NEXT: v_pk_add_u16 v6, v10, v6 ; GCN-NEXT: v_pk_add_u16 v5, v9, v5 ; GCN-NEXT: v_pk_add_u16 v4, v8, v4 ; GCN-NEXT: v_pk_add_u16 v3, v11, v3 ; GCN-NEXT: v_pk_add_u16 v2, v10, v2 ; GCN-NEXT: v_pk_add_u16 v1, v9, v1 ; GCN-NEXT: v_pk_add_u16 v0, v8, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB21_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b128 v0, v[4:7] ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <16 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <16 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <16 x bfloat> %load to <16 x i16> %bc.1 = bitcast <16 x bfloat> %phi to <16 x i16> %add.i = add <16 x i16> %bc.0, %bc.1 %add = bitcast <16 x i16> %add.i to <16 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <16 x bfloat> %add, ptr addrspace(3) poison ret void } define amdgpu_kernel void @undef_v32bf16(ptr addrspace(3) %ptr, i1 %cond) { ; GCN-LABEL: undef_v32bf16: ; GCN: ; %bb.0: ; %entry ; GCN-NEXT: s_load_dword s0, s[8:9], 0x4 ; GCN-NEXT: ; implicit-def: $vgpr4 ; GCN-NEXT: ; implicit-def: $vgpr8 ; GCN-NEXT: ; implicit-def: $vgpr12 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: s_bitcmp1_b32 s0, 0 ; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0 ; GCN-NEXT: s_xor_b64 s[0:1], s[0:1], -1 ; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1] ; GCN-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 ; GCN-NEXT: ; implicit-def: $vgpr0 ; GCN-NEXT: .LBB22_1: ; %loop ; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 ; GCN-NEXT: ds_read_b128 v[16:19], v0 ; GCN-NEXT: s_and_b64 vcc, exec, s[0:1] ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_pk_add_u16 v15, v19, v15 ; GCN-NEXT: v_pk_add_u16 v14, v18, v14 ; GCN-NEXT: v_pk_add_u16 v13, v17, v13 ; GCN-NEXT: v_pk_add_u16 v12, v16, v12 ; GCN-NEXT: v_pk_add_u16 v11, v19, v11 ; GCN-NEXT: v_pk_add_u16 v10, v18, v10 ; GCN-NEXT: v_pk_add_u16 v9, v17, v9 ; GCN-NEXT: v_pk_add_u16 v8, v16, v8 ; GCN-NEXT: v_pk_add_u16 v7, v19, v7 ; GCN-NEXT: v_pk_add_u16 v6, v18, v6 ; GCN-NEXT: v_pk_add_u16 v5, v17, v5 ; GCN-NEXT: v_pk_add_u16 v4, v16, v4 ; GCN-NEXT: v_pk_add_u16 v3, v19, v3 ; GCN-NEXT: v_pk_add_u16 v2, v18, v2 ; GCN-NEXT: v_pk_add_u16 v1, v17, v1 ; GCN-NEXT: v_pk_add_u16 v0, v16, v0 ; GCN-NEXT: s_cbranch_vccnz .LBB22_1 ; GCN-NEXT: ; %bb.2: ; %ret ; GCN-NEXT: ds_write_b128 v0, v[12:15] ; GCN-NEXT: ds_write_b128 v0, v[8:11] ; GCN-NEXT: ds_write_b128 v0, v[4:7] ; GCN-NEXT: ds_write_b128 v0, v[0:3] ; GCN-NEXT: s_endpgm entry: br label %loop loop: %phi = phi <32 x bfloat> [ poison, %entry ], [ %add, %loop ] %load = load volatile <32 x bfloat>, ptr addrspace(3) poison %bc.0 = bitcast <32 x bfloat> %load to <32 x i16> %bc.1 = bitcast <32 x bfloat> %phi to <32 x i16> %add.i = add <32 x i16> %bc.0, %bc.1 %add = bitcast <32 x i16> %add.i to <32 x bfloat> br i1 %cond, label %loop, label %ret ret: store volatile <32 x bfloat> %add, ptr addrspace(3) poison ret void } define i64 @poison_should_freeze(i1 %cond1, i32 %val, i16 %val2, i64 %a, i64 %b) { ; GCN-LABEL: poison_should_freeze: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GCN-NEXT: v_and_b32_e32 v0, 1, v0 ; GCN-NEXT: v_mov_b32_e32 v7, 0x5040100 ; GCN-NEXT: v_perm_b32 v2, v2, s4, v7 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0 ; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 ; GCN-NEXT: v_cndmask_b32_e32 v0, v5, v3, vcc ; GCN-NEXT: v_cndmask_b32_e32 v1, v6, v4, vcc ; GCN-NEXT: s_setpc_b64 s[30:31] %poisonv = insertelement <2 x i16> poison, i16 %val2, i32 1 %poison = bitcast <2 x i16> %poisonv to i32 %cond2 = select i1 %cond1, i32 %poison, i32 %val %cmp = icmp eq i32 %cond2, 0 %select = select i1 %cmp, i64 %a, i64 %b ret i64 %select }