diff options
Diffstat (limited to 'llvm/test')
25 files changed, 3436 insertions, 2768 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll index ed48999..bd28f72 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll @@ -1,734 +1,759 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmax_legacy_ugt_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmax_legacy_ugt_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_max_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmax_legacy_ugt_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmax_legacy_ugt_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_max_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmax_legacy_ugt_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v2 +; SI-NEXT: v_max_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmax_legacy_ugt_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_max_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmax_legacy_ugt_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v3 +; SI-NEXT: v_max_f32_e32 v1, v1, v4 +; SI-NEXT: v_max_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmax_legacy_ugt_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_max_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmax_legacy_ugt_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v1, v1, v3 +; VI-NEXT: v_max_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v4 +; SI-NEXT: v_max_f32_e32 v1, v1, v5 +; SI-NEXT: v_max_f32_e32 v2, v2, v6 +; SI-NEXT: v_max_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmax_legacy_ugt_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_max_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_max_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_max_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_max_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_max_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_max_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_max_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_max_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_max_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_max_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_max_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_max_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_max_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_max_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_max_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_max_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_max_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ugt <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmax_legacy_ugt_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_max_f16_e32 v3, v3, v7 +; VI-NEXT: v_max_f16_e32 v2, v2, v6 +; VI-NEXT: v_max_f16_e32 v1, v1, v5 +; VI-NEXT: v_max_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_max_f32_e32 v0, v0, v8 +; SI-NEXT: v_max_f32_e32 v1, v1, v9 +; SI-NEXT: v_max_f32_e32 v2, v2, v10 +; SI-NEXT: v_max_f32_e32 v3, v3, v11 +; SI-NEXT: v_max_f32_e32 v4, v4, v12 +; SI-NEXT: v_max_f32_e32 v5, v5, v13 +; SI-NEXT: v_max_f32_e32 v6, v6, v14 +; SI-NEXT: v_max_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmax_legacy_ugt_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_max_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_max_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ugt <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll index eee2bd1..f3a84e6 100644 --- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -12,12 +10,10 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -34,18 +30,38 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp uge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] ; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] ; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] +; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -64,16 +80,40 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %o ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] +; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]] +; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] + +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + + %cmp = fcmp uge float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_ge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -89,17 +129,35 @@ define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_oge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp oge float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nle_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -115,16 +173,35 @@ define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr a ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ugt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ugt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -140,17 +217,35 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ogt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] -; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] ; EG: MAX define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 @@ -166,23 +261,39 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; EG: MAX +define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32: -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 -; SI-SAFE: v_max_legacy_f32_e32 - -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_gt_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE-NOT: v_cmp -; VI-SAFE-NOT: v_cndmask - -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 -; GCN-NONAN: v_max_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 +; SI: v_max_legacy_f32_e32 + +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_gt_f32_e32 +; VI: v_cndmask_b32_e32 +; VI-NOT: v_cmp +; VI-NOT: v_cndmask ; GCN-NOT: v_max define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { @@ -199,6 +310,27 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32_fast: + +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 +; GCN: v_max_f32_e32 + +; GCN-NOT: v_max +define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ogt <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll index 2ac5891..37f077d5 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll @@ -1,16 +1,12 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope --check-prefixes=GCN,VI-NNAN %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN %s ; GCN-LABEL: {{^}}min_fneg_select_regression_0: ; GCN-NOT: v_mul -; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 - -; VI-SAFE: v_cmp_nle_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, 1.0 @@ -18,15 +14,23 @@ define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}min_fneg_select_regression_0_fast: +; GCN-NOT: v_mul + +define amdgpu_ps float @min_fneg_select_regression_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0: ; GCN-NOT: v_mul ; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ult float %a, -1.0 @@ -34,15 +38,24 @@ define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 +; VI: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0 +define amdgpu_ps float @min_fneg_select_regression_posk_0_fast(float %a, float %b) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +; VI: v_cmp_nge_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, 1.0 @@ -50,15 +63,24 @@ define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 { ret float %min.a } -; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-LABEL: {{^}}max_fneg_select_regression_0_fast: ; GCN-NOT: v_mul -; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0 +define amdgpu_ps float @max_fneg_select_regression_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0: +; GCN-NOT: v_mul -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc +; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0 -; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { %fneg.a = fsub float -0.0, %a %cmp.a = fcmp ugt float %a, -1.0 @@ -66,13 +88,22 @@ define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 { ret float %min.a } +; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0_fast: +; GCN-NOT: v_mul + +; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0 +define amdgpu_ps float @max_fneg_select_regression_posk_0_fast(float %a) #0 { + %fneg.a = fsub float -0.0, %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -1.0 @@ -80,13 +111,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1: ; SI: v_max_legacy_f32_e64 v0, 1.0, -v0 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -1.0 @@ -94,13 +133,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1_fast: + +; VI: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1: ; SI: v_min_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_lt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -1.0 @@ -108,13 +155,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1_fast: + +; VI: v_min_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1: ; SI: v_max_legacy_f32_e64 v0, -v0, 1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, -1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc - -; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +; VI: v_cmp_gt_f32_e32 vcc, -1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -1.0 @@ -122,17 +177,24 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1_fast: + +; VI-NANN: v_max_f32_e64 v0, -v0, 1.0 +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nge_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, -8.0 @@ -140,17 +202,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ugt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, [[K]], -v0 -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_nle_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, -8.0 @@ -158,17 +228,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ult float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_min_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_lt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ogt float %a, -8.0 @@ -176,18 +254,26 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp ogt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8: ; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 ; SI-NEXT: v_max_legacy_f32_e64 v0, -v0, [[K]] -; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 -; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[K0]], v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc - -; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000 -; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]] +; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000 +; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000 +; VI: v_cmp_gt_f32_e32 vcc, [[K0]], v0 +; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, -8.0 @@ -195,13 +281,22 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8_fast: + +; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000 +; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]] +define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, -8.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1: ; SI: v_max_legacy_f32_e64 v0, -v0, -1.0 -; VI-SAFE: v_cmp_gt_f32_e32 vcc, 1.0, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc - -; VI-NNAN: v_max_f32_e64 v0, -v0, -1.0 +; VI: v_cmp_gt_f32_e32 vcc, 1.0, v0 +; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp olt float %a, 1.0 @@ -209,15 +304,22 @@ define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 ret float %min.a } +; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1_fast: + +; VI: v_max_f32_e64 v0, -v0, -1.0 +define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp olt float %a, 1.0 + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0 + ret float %min.a +} + ; GCN-LABEL: {{^}}ult_a_select_fneg_a_b: ; SI: v_cmp_nge_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_lt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nge_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ult float %a, %b @@ -225,15 +327,23 @@ define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ult_a_select_fneg_a_b_fast: + +; VI: v_cmp_lt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ult_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ult float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + ; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b: ; SI: v_cmp_nle_f32_e32 vcc, v0, v1 ; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc -; VI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc - -; VI-NNAN: v_cmp_gt_f32_e32 vcc, v0, v1 -; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +; VI: v_cmp_nle_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { %fneg.a = fneg float %a %cmp.a = fcmp ugt float %a, %b @@ -241,5 +351,16 @@ define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 { ret float %min.a } +; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b_fast: + +; VI: v_cmp_gt_f32_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc +define amdgpu_ps float @ugt_a_select_fneg_a_b_fast(float %a, float %b) #0 { + %fneg.a = fneg float %a + %cmp.a = fcmp nnan nsz ugt float %a, %b + %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b + ret float %min.a +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll index 34cb0b1..40c2ec0 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll @@ -1,735 +1,760 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s +; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s +; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s -; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s +; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s define half @test_fmin_legacy_ule_f16(half %a, half %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v1, v0 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v1 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-TRUE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-TRUE16: ; %bb.0: -; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l -; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-FAKE16-LABEL: test_fmin_legacy_ule_f16: -; GFX11-NNAN-FAKE16: ; %bb.0: -; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 -; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_min_legacy_f32_e32 v0, v1, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule half %a, %b %val = select i1 %cmp, half %a, half %b ret half %val } +define half @test_fmin_legacy_ule_f16_fast(half %a, half %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16_fast: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule half %a, %b + %val = select nnan nsz i1 %cmp, half %a, half %b + ret half %val +} + define <2 x half> @test_fmin_legacy_ule_v2f16(<2 x half> %a, <2 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v2f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v2, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v3, v1 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v2 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v3 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v2f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v1 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2 +; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1 +; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_min_legacy_f32_e32 v0, v2, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v3, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v2f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <2 x half> %a, %b %val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b ret <2 x half> %val } +define <2 x half> @test_fmin_legacy_ule_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v1 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v2f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v2 +; SI-NEXT: v_min_f32_e32 v1, v1, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v2f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v1 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <2 x half> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b + ret <2 x half> %val +} + define <3 x half> @test_fmin_legacy_ule_v3f16(<3 x half> %a, <3 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v3f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v3, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v4, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v5, v2 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v3 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v5 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v3f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v3f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_min_legacy_f32_e32 v0, v3, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v4, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v5, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v3f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <3 x half> %a, %b %val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b ret <3 x half> %val } +define <3 x half> @test_fmin_legacy_ule_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX9-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_or_b32_e32 v0, v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v3f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v3 +; SI-NEXT: v_min_f32_e32 v1, v1, v4 +; SI-NEXT: v_min_f32_e32 v2, v2, v5 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v3f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_max_f16 v3, v3, v3 +; GFX11-NEXT: v_pk_max_f16 v1, v1, v1 +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <3 x half> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b + ret <3 x half> %val +} + define <4 x half> @test_fmin_legacy_ule_v4f16(<4 x half> %a, <4 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v4f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v4, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v5, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v6, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v7, v3 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v4 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v5 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v6 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v7 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 -; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v4f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1 +; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2 +; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6 +; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4 +; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3 +; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2 +; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4 +; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6 +; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_min_legacy_f32_e32 v0, v4, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v5, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v6, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v7, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v4f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <4 x half> %a, %b %val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b ret <4 x half> %val } +define <4 x half> @test_fmin_legacy_ule_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v1, v1, v3 +; VI-NEXT: v_min_f16_e32 v0, v0, v2 +; VI-NEXT: v_or_b32_e32 v0, v0, v5 +; VI-NEXT: v_or_b32_e32 v1, v1, v4 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v4f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v4 +; SI-NEXT: v_min_f32_e32 v1, v1, v5 +; SI-NEXT: v_min_f32_e32 v2, v2, v6 +; SI-NEXT: v_min_f32_e32 v3, v3, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v4f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v2 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v3 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <4 x half> %a, %b + %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b + ret <4 x half> %val +} + define <8 x half> @test_fmin_legacy_ule_v8f16(<8 x half> %a, <8 x half> %b) #0 { -; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-SAFE: ; %bb.0: -; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100 -; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4 -; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4 -; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX9-NNAN: ; %bb.0: -; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX9-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX9-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; VI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; VI-SAFE: ; %bb.0: -; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc -; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 -; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8 -; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10 -; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12 -; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14 -; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD -; VI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; VI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; VI-NNAN: ; %bb.0: -; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NNAN-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 -; VI-NNAN-NEXT: v_min_f16_e32 v3, v3, v7 -; VI-NNAN-NEXT: v_min_f16_e32 v2, v2, v6 -; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v5 -; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v4 -; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11 -; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10 -; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9 -; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8 -; VI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; SI-SAFE-LABEL: test_fmin_legacy_ule_v8f16: -; SI-SAFE: ; %bb.0: -; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v8, v0 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v9, v1 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v10, v2 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v11, v3 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v4, v12, v4 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v5, v13, v5 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v6, v14, v6 -; SI-SAFE-NEXT: v_min_legacy_f32_e32 v7, v15, v7 -; SI-SAFE-NEXT: s_setpc_b64 s[30:31] -; -; SI-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; SI-NNAN: ; %bb.0: -; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8 -; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0 -; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v8 -; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v9 -; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v10 -; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v11 -; SI-NNAN-NEXT: v_min_f32_e32 v4, v4, v12 -; SI-NNAN-NEXT: v_min_f32_e32 v5, v5, v13 -; SI-NNAN-NEXT: v_min_f32_e32 v6, v6, v14 -; SI-NNAN-NEXT: v_min_f32_e32 v7, v7, v15 -; SI-NNAN-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-TRUE16: ; %bb.0: -; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l -; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 -; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 -; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-SAFE-FAKE16: ; %bb.0: -; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 -; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) -; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 -; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31] -; -; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v8f16: -; GFX11-NNAN: ; %bb.0: -; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v4 -; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v5 -; GFX11-NNAN-NEXT: v_pk_min_f16 v2, v2, v6 -; GFX11-NNAN-NEXT: v_pk_min_f16 v3, v3, v7 -; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31] +; GFX9-LABEL: test_fmin_legacy_ule_v8f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4 +; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4 +; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4 +; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7 +; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3 +; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14 +; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5 +; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1 +; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12 +; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10 +; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8 +; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7 +; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6 +; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5 +; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc +; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4 +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8 +; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10 +; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12 +; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14 +; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_min_legacy_f32_e32 v0, v8, v0 +; SI-NEXT: v_min_legacy_f32_e32 v1, v9, v1 +; SI-NEXT: v_min_legacy_f32_e32 v2, v10, v2 +; SI-NEXT: v_min_legacy_f32_e32 v3, v11, v3 +; SI-NEXT: v_min_legacy_f32_e32 v4, v12, v4 +; SI-NEXT: v_min_legacy_f32_e32 v5, v13, v5 +; SI-NEXT: v_min_legacy_f32_e32 v6, v14, v6 +; SI-NEXT: v_min_legacy_f32_e32 v7, v15, v7 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-TRUE16: ; %bb.0: +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l +; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo +; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5 +; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6 +; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v8f16: +; GFX11-FAKE16: ; %bb.0: +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1 +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4 +; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo +; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7 +; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo +; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100 +; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp ule <8 x half> %a, %b %val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b ret <8 x half> %val } +define <8 x half> @test_fmin_legacy_ule_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 { +; GFX9-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX9-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX9-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX9-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; VI-NEXT: v_min_f16_e32 v3, v3, v7 +; VI-NEXT: v_min_f16_e32 v2, v2, v6 +; VI-NEXT: v_min_f16_e32 v1, v1, v5 +; VI-NEXT: v_min_f16_e32 v0, v0, v4 +; VI-NEXT: v_or_b32_e32 v0, v0, v11 +; VI-NEXT: v_or_b32_e32 v1, v1, v10 +; VI-NEXT: v_or_b32_e32 v2, v2, v9 +; VI-NEXT: v_or_b32_e32 v3, v3, v8 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: test_fmin_legacy_ule_v8f16_fast: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v15, v15 +; SI-NEXT: v_cvt_f16_f32_e32 v7, v7 +; SI-NEXT: v_cvt_f16_f32_e32 v14, v14 +; SI-NEXT: v_cvt_f16_f32_e32 v6, v6 +; SI-NEXT: v_cvt_f16_f32_e32 v13, v13 +; SI-NEXT: v_cvt_f16_f32_e32 v5, v5 +; SI-NEXT: v_cvt_f16_f32_e32 v12, v12 +; SI-NEXT: v_cvt_f16_f32_e32 v4, v4 +; SI-NEXT: v_cvt_f16_f32_e32 v11, v11 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v10, v10 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v9, v9 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v8, v8 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v15, v15 +; SI-NEXT: v_cvt_f32_f16_e32 v7, v7 +; SI-NEXT: v_cvt_f32_f16_e32 v14, v14 +; SI-NEXT: v_cvt_f32_f16_e32 v6, v6 +; SI-NEXT: v_cvt_f32_f16_e32 v13, v13 +; SI-NEXT: v_cvt_f32_f16_e32 v5, v5 +; SI-NEXT: v_cvt_f32_f16_e32 v12, v12 +; SI-NEXT: v_cvt_f32_f16_e32 v4, v4 +; SI-NEXT: v_cvt_f32_f16_e32 v11, v11 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_cvt_f32_f16_e32 v10, v10 +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v9, v9 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v8, v8 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_min_f32_e32 v0, v0, v8 +; SI-NEXT: v_min_f32_e32 v1, v1, v9 +; SI-NEXT: v_min_f32_e32 v2, v2, v10 +; SI-NEXT: v_min_f32_e32 v3, v3, v11 +; SI-NEXT: v_min_f32_e32 v4, v4, v12 +; SI-NEXT: v_min_f32_e32 v5, v5, v13 +; SI-NEXT: v_min_f32_e32 v6, v6, v14 +; SI-NEXT: v_min_f32_e32 v7, v7, v15 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: test_fmin_legacy_ule_v8f16_fast: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_pk_min_f16 v0, v0, v4 +; GFX11-NEXT: v_pk_min_f16 v1, v1, v5 +; GFX11-NEXT: v_pk_min_f16 v2, v2, v6 +; GFX11-NEXT: v_pk_min_f16 v3, v3, v7 +; GFX11-NEXT: s_setpc_b64 s[30:31] + %cmp = fcmp ule <8 x half> %a, %b + %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b + ret <8 x half> %val +} + attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll index ec4dd85..defcffa 100644 --- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll +++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll @@ -1,8 +1,6 @@ -; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s -; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s -; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NONAN,GCN-NONAN,GCN,FUNC %s +; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s ; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s @@ -14,13 +12,9 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1 ; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32: ; EG: MIN * -; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; SI: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-SAFE: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} - -; VI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +; VI: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) %out, <4 x float> %reg0) #0 { %r0 = extractelement <4 x float> %reg0, i32 0 %r1 = extractelement <4 x float> %reg0, i32 1 @@ -30,22 +24,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) ret void } -; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: -; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} +; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32_fast: -; SI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; SI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} -; GCN-NONAN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; VI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}} +define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32_fast(ptr addrspace(1) %out, <4 x float> %reg0) #0 { + %r0 = extractelement <4 x float> %reg0, i32 0 + %r1 = extractelement <4 x float> %reg0, i32 1 + %r2 = fcmp nnan nsz uge float %r0, %r1 + %r3 = select nnan nsz i1 %r2, float %r1, float %r0 + store float %r3, ptr addrspace(1) %out + ret void +} -; VI-SAFE: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; SI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] +; VI: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] -; VI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +; VI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]] +; VI: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]] define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, float %a, float %b) #0 { %cmp = fcmp ule float %a, %b %val = select i1 %cmp, float %a, float %b @@ -53,6 +57,19 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ret void } +; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32_fast: +; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; Nsz also needed ; FIXME: Should separate tests ; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src: @@ -61,12 +78,10 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo ; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 ; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] - -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] -; VI-SAFE: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]] +; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 { %a.nnan = fadd nnan float %a, 1.0 %b.nnan = fadd nnan float %b, 2.0 @@ -76,16 +91,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) ret void } +; Nsz also needed +; FIXME: Should separate tests +; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast: +; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}} + +; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0 +; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0 + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]] +define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 { + %a.nnan = fadd nnan float %a, 1.0 + %b.nnan = fadd nnan float %b, 2.0 + %cmp = fcmp ule float %a.nnan, %b.nnan + %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] -; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] - -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -100,16 +131,33 @@ define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; VI-SAFE: v_cmp_le_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %cmp = fcmp ule float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + +; VI: v_cmp_le_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -124,16 +172,33 @@ define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ole_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 + + %cmp = fcmp ole float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]] -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +; VI: v_cmp_lt_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -148,16 +213,33 @@ define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_olt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp olt float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid @@ -172,16 +254,33 @@ define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr a ret void } -; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32_fast: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] -; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1 -; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] -; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] + %a = load volatile float, ptr addrspace(1) %gep.0, align 4 + %b = load volatile float, ptr addrspace(1) %gep.1, align 4 -; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] + %cmp = fcmp ult float %a, %b + %val = select nnan nsz i1 %cmp, float %a, float %b + store float %val, ptr addrspace(1) %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]] + +; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]] +; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]] define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid @@ -196,19 +295,35 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32_fast: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] + +; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]] +define amdgpu_kernel void @test_fmin_legacy_ult_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <1 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <1 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <1 x float> %a, %b + %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b + store <1 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32: ; GCN: {{buffer|flat}}_load_dwordx2 ; GCN: {{buffer|flat}}_load_dwordx2 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid @@ -223,25 +338,40 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32_fast: +; GCN: {{buffer|flat}}_load_dwordx2 +; GCN: {{buffer|flat}}_load_dwordx2 + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +define amdgpu_kernel void @test_fmin_legacy_ult_v2f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <2 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load volatile <2 x float>, ptr addrspace(1) %gep.0 + %b = load volatile <2 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <2 x float> %a, %b + %val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b + store <2 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32: -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE: v_min_legacy_f32_e32 -; SI-SAFE-NOT: v_min_ - -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 -; VI-SAFE: v_cmp_nge_f32_e32 -; VI-SAFE: v_cndmask_b32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI: v_min_legacy_f32_e32 +; SI-NOT: v_min_ + +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 +; VI: v_cmp_nge_f32_e32 +; VI: v_cndmask_b32_e32 ; VI-NOT: v_cmp ; VI-NOT: v_cndmask - -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN: v_min_f32_e32 -; GCN-NONAN-NOT: v_min_ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid @@ -256,6 +386,28 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr ret void } +; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32_fast: +; VI-NOT: v_cmp +; VI-NOT: v_cndmask + +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN: v_min_f32_e32 +; GCN-NOT: v_min_ +define amdgpu_kernel void @test_fmin_legacy_ult_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() #1 + %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid + %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1 + + %a = load <3 x float>, ptr addrspace(1) %gep.0 + %b = load <3 x float>, ptr addrspace(1) %gep.1 + + %cmp = fcmp ult <3 x float> %a, %b + %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b + store <3 x float> %val, ptr addrspace(1) %out + ret void +} + ; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_multi_use: ; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] ; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]] diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll new file mode 100644 index 0000000..e1784f8 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll @@ -0,0 +1,197 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s + +;; xvinsve0.w +define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_l_4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 4 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 5, i32 6, i32 7> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + store <8 x float> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 0, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8i32_h_6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x i32>, ptr %a + %vb = load <8 x i32>, ptr %b + %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 15> + store <8 x i32> %vc, ptr %d + ret void +} + +define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v8f32_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <8 x float>, ptr %a + %vb = load <8 x float>, ptr %b + %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + store <8 x float> %vc, ptr %d + ret void +} + +;; xvinsve0.d +define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 4, i32 2, i32 3> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_l_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 2 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 1, i32 4, i32 3> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_l: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 0 +; CHECK-NEXT: xvst $xr0, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 4, i32 1, i32 2, i32 3> + store <4 x double> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_0: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4i64_h_2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x i64>, ptr %a + %vb = load <4 x i64>, ptr %b + %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 4, i32 5, i32 0, i32 7> + store <4 x i64> %vc, ptr %d + ret void +} + +define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind { +; CHECK-LABEL: xvinsve0_v4f64_h: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvld $xr0, $a1, 0 +; CHECK-NEXT: xvld $xr1, $a2, 0 +; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0 +; CHECK-NEXT: xvst $xr1, $a0, 0 +; CHECK-NEXT: ret +entry: + %va = load <4 x double>, ptr %a + %vb = load <4 x double>, ptr %b + %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + store <4 x double> %vc, ptr %d + ret void +} diff --git a/llvm/test/CodeGen/NVPTX/bug22322.ll b/llvm/test/CodeGen/NVPTX/bug22322.ll index 055c512..71e180b 100644 --- a/llvm/test/CodeGen/NVPTX/bug22322.ll +++ b/llvm/test/CodeGen/NVPTX/bug22322.ll @@ -20,12 +20,12 @@ _ZL11compute_vecRK6float3jb.exit: call void @llvm.lifetime.start.p0(i64 4, ptr %ret_vec.sroa.8.i) %6 = and i32 %4, 15 %7 = icmp eq i32 %6, 0 - %8 = select i1 %7, float 0.000000e+00, float -1.000000e+00 + %8 = select nnan nsz i1 %7, float 0.000000e+00, float -1.000000e+00 store float %8, ptr %ret_vec.sroa.8.i, align 4 ; CHECK: max.f32 %r{{[0-9]+}}, %r{{[0-9]+}}, 0f00000000 %9 = fcmp olt float %8, 0.000000e+00 %ret_vec.sroa.8.i.val = load float, ptr %ret_vec.sroa.8.i, align 4 - %10 = select i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val + %10 = select nnan nsz i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val call void @llvm.lifetime.end.p0(i64 4, ptr %ret_vec.sroa.8.i) %11 = getelementptr inbounds %class.float3, ptr %dst, i64 %5, i32 0 store float 0.000000e+00, ptr %11, align 4 @@ -51,7 +51,7 @@ declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2 ; Function Attrs: nounwind declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2 -attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "no-signed-zeros-fp-math"="true" "use-soft-float"="false" } +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" } attributes #1 = { nounwind readnone } attributes #2 = { nounwind } diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll index 216d498..5f637e3 100644 --- a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll @@ -1,36 +1,23 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s -; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \ -; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \ -; RUN: --enable-no-nans-fp-math \ -; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s ; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P9 +; RUN: --check-prefix=P9 ; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \ ; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \ -; RUN: --check-prefix=NO-FAST-P8 +; RUN: --check-prefix=P8 define dso_local float @testfmax(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -38,23 +25,18 @@ entry: } define dso_local double @testdmax(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bgtlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxcdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bgtlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp ogt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -62,23 +44,18 @@ entry: } define dso_local float @testfmin(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin: +; P8: # %bb.0: # %entry +; P8-NEXT: fcmpu cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt float %a, %b %cond = select i1 %cmp, float %a, float %b @@ -86,23 +63,18 @@ entry: } define dso_local double @testdmin(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2 -; NO-FAST-P8-NEXT: bltlr cr0 -; NO-FAST-P8-NEXT: # %bb.1: # %entry -; NO-FAST-P8-NEXT: fmr f1, f2 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmincdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin: +; P8: # %bb.0: # %entry +; P8-NEXT: xscmpudp cr0, f1, f2 +; P8-NEXT: bltlr cr0 +; P8-NEXT: # %bb.1: # %entry +; P8-NEXT: fmr f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp olt double %a, %b %cond = select i1 %cmp, double %a, double %b @@ -110,86 +82,62 @@ entry: } define dso_local float @testfmax_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmax_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmax_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmaxdp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmax_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmax_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f2, f1 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmax_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmaxdp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmax_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmaxdp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf ogt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } define dso_local float @testfmin_fast(float %a, float %b) local_unnamed_addr { -; CHECK-LABEL: testfmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testfmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testfmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubsp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testfmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testfmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt float %a, %b - %cond = select i1 %cmp, float %a, float %b + %cond = select nnan nsz i1 %cmp, float %a, float %b ret float %cond } define dso_local double @testdmin_fast(double %a, double %b) local_unnamed_addr { -; CHECK-LABEL: testdmin_fast: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xsmindp f1, f1, f2 -; CHECK-NEXT: blr -; -; NO-FAST-P9-LABEL: testdmin_fast: -; NO-FAST-P9: # %bb.0: # %entry -; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2 -; NO-FAST-P9-NEXT: blr -; -; NO-FAST-P8-LABEL: testdmin_fast: -; NO-FAST-P8: # %bb.0: # %entry -; NO-FAST-P8-NEXT: xssubdp f0, f1, f2 -; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1 -; NO-FAST-P8-NEXT: blr +; P9-LABEL: testdmin_fast: +; P9: # %bb.0: # %entry +; P9-NEXT: xsmindp f1, f1, f2 +; P9-NEXT: blr +; +; P8-LABEL: testdmin_fast: +; P8: # %bb.0: # %entry +; P8-NEXT: xsmindp f1, f1, f2 +; P8-NEXT: blr entry: %cmp = fcmp nnan ninf olt double %a, %b - %cond = select i1 %cmp, double %a, double %b + %cond = select nnan nsz i1 %cmp, double %a, double %b ret double %cond } diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll index 796121a..44028a7 100644 --- a/llvm/test/CodeGen/RISCV/select-bare.ll +++ b/llvm/test/CodeGen/RISCV/select-bare.ll @@ -26,8 +26,8 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind { ; RV32IXQCI-LABEL: bare_select: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 @@ -53,8 +53,8 @@ define float @bare_select_float(i1 %a, float %b, float %c) nounwind { ; RV32IXQCI-LABEL: bare_select_float: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %1 = select i1 %a, float %b, float %c ret float %1 diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll index 14055df..b57f625 100644 --- a/llvm/test/CodeGen/RISCV/select-cc.ll +++ b/llvm/test/CodeGen/RISCV/select-cc.ll @@ -87,40 +87,40 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind { ; ; RV32IXQCI-LABEL: foo: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: lw a5, 0(a1) ; RV32IXQCI-NEXT: lw a2, 0(a1) ; RV32IXQCI-NEXT: lw a4, 0(a1) ; RV32IXQCI-NEXT: lw t5, 0(a1) ; RV32IXQCI-NEXT: lw t4, 0(a1) +; RV32IXQCI-NEXT: lw t3, 0(a1) ; RV32IXQCI-NEXT: lw t2, 0(a1) -; RV32IXQCI-NEXT: lw t1, 0(a1) ; RV32IXQCI-NEXT: lw t0, 0(a1) ; RV32IXQCI-NEXT: lw a7, 0(a1) ; RV32IXQCI-NEXT: lw a6, 0(a1) -; RV32IXQCI-NEXT: lw t3, 0(a1) ; RV32IXQCI-NEXT: lw a3, 0(a1) -; RV32IXQCI-NEXT: bltz t3, .LBB0_2 +; RV32IXQCI-NEXT: lw t1, 0(a1) +; RV32IXQCI-NEXT: lw a5, 0(a1) +; RV32IXQCI-NEXT: bltz t1, .LBB0_2 ; RV32IXQCI-NEXT: # %bb.1: -; RV32IXQCI-NEXT: li t6, 0 -; RV32IXQCI-NEXT: qc.mveq a5, a0, a5, a0 -; RV32IXQCI-NEXT: qc.mvne a2, a5, a2, a5 -; RV32IXQCI-NEXT: qc.mvltu a4, a4, a2, a2 -; RV32IXQCI-NEXT: qc.mvgeu t5, a4, t5, a4 -; RV32IXQCI-NEXT: qc.mvltu t4, t5, t4, t5 -; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t4, t4 -; RV32IXQCI-NEXT: qc.mvlt t1, t1, t2, t2 -; RV32IXQCI-NEXT: qc.mvge t0, t1, t0, t1 -; RV32IXQCI-NEXT: qc.mvlt a7, t0, a7, t0 -; RV32IXQCI-NEXT: qc.mvge a6, a6, a7, a7 -; RV32IXQCI-NEXT: mv a3, t3 -; RV32IXQCI-NEXT: qc.mvge a3, t6, t3, a6 +; RV32IXQCI-NEXT: li a5, 0 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0 +; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2 +; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4 +; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5 +; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4 +; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3 +; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2 +; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0 +; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7 +; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6 +; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1 +; RV32IXQCI-NEXT: mv a5, a3 ; RV32IXQCI-NEXT: .LBB0_2: ; RV32IXQCI-NEXT: lw a2, 0(a1) ; RV32IXQCI-NEXT: lw a0, 0(a1) ; RV32IXQCI-NEXT: li a1, 1024 -; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5 ; RV32IXQCI-NEXT: li a1, 2046 -; RV32IXQCI-NEXT: qc.mvltu a0, a1, t3, a2 +; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: foo: @@ -417,8 +417,8 @@ define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z) ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lui a3, 1048560 ; RV32IXQCI-NEXT: addi a3, a3, -1 -; RV32IXQCI-NEXT: qc.mvlt a2, a3, a0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mvge a1, a3, a0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_sge_int16min: @@ -471,10 +471,10 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) { ; RV32IXQCI-NEXT: srli a0, a1, 31 ; RV32IXQCI-NEXT: xori a0, a0, 1 ; RV32IXQCI-NEXT: qc.mveqi a0, a1, -1, a6 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a3 -; RV32IXQCI-NEXT: mv a0, a4 -; RV32IXQCI-NEXT: mv a1, a5 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: qc.mveqi a3, a0, 0, a5 +; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: ret ; ; RV64I-LABEL: select_sge_int32min: diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll index b88fe9a..3ca0f46 100644 --- a/llvm/test/CodeGen/RISCV/select-cond.ll +++ b/llvm/test/CodeGen/RISCV/select-cond.ll @@ -35,8 +35,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign ; RV32-XQCICM-LABEL: select_i32_trunc: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32-XQCICM-NEXT: mv a0, a2 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32-XQCICM-NEXT: mv a0, a1 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_trunc: @@ -48,8 +48,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign ; RV32IXQCI-LABEL: select_i32_trunc: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_trunc: @@ -93,8 +93,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe ; RV32-XQCICM-LABEL: select_i32_param: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32-XQCICM-NEXT: mv a0, a2 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32-XQCICM-NEXT: mv a0, a1 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_param: @@ -106,8 +106,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe ; RV32IXQCI-LABEL: select_i32_param: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_param: @@ -148,8 +148,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32-XQCICM-LABEL: select_i32_eq: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_eq: @@ -163,8 +163,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32IXQCI-LABEL: select_i32_eq: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_eq: @@ -205,8 +205,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32-XQCICM-LABEL: select_i32_ne: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ne: @@ -220,8 +220,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x ; ; RV32IXQCI-LABEL: select_i32_ne: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ne: @@ -262,8 +262,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ugt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ugt: @@ -277,8 +277,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ugt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ugt: @@ -319,8 +319,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_uge: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_uge: @@ -334,8 +334,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_uge: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_uge: @@ -376,8 +376,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ult: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ult: @@ -391,8 +391,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ult: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ult: @@ -433,8 +433,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_ule: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_ule: @@ -448,8 +448,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_ule: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_ule: @@ -490,8 +490,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sgt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sgt: @@ -505,8 +505,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sgt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sgt: @@ -547,8 +547,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sge: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sge: @@ -562,8 +562,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sge: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sge: @@ -604,8 +604,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_slt: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_slt: @@ -619,8 +619,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_slt: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_slt: @@ -661,8 +661,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32-XQCICM-LABEL: select_i32_sle: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 +; RV32-XQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i32_sle: @@ -676,8 +676,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext % ; ; RV32IXQCI-LABEL: select_i32_sle: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i32_sle: @@ -723,11 +723,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind { ; ; RV32-XQCICM-LABEL: select_i64_trunc: ; RV32-XQCICM: # %bb.0: -; RV32-XQCICM-NEXT: mv a1, a5 +; RV32-XQCICM-NEXT: mv a1, a3 ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32-XQCICM-NEXT: qc.mvnei a1, a0, 0, a3 -; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a5 +; RV32-XQCICM-NEXT: mv a0, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_trunc: @@ -740,11 +740,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind { ; ; RV32IXQCI-LABEL: select_i64_trunc: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: mv a1, a5 +; RV32IXQCI-NEXT: mv a1, a3 ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3 -; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a5 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_trunc: @@ -792,10 +792,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-LABEL: select_i64_param: ; RV32-XQCICM: # %bb.0: ; RV32-XQCICM-NEXT: andi a0, a0, 1 -; RV32-XQCICM-NEXT: qc.mvnei a3, a0, 0, a1 -; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32-XQCICM-NEXT: mv a0, a3 -; RV32-XQCICM-NEXT: mv a1, a4 +; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a3 +; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32-XQCICM-NEXT: mv a0, a1 +; RV32-XQCICM-NEXT: mv a1, a2 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_param: @@ -810,10 +810,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind { ; RV32IXQCI-LABEL: select_i64_param: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 0, a1 -; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2 -; RV32IXQCI-NEXT: mv a0, a3 -; RV32IXQCI-NEXT: mv a1, a4 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4 +; RV32IXQCI-NEXT: mv a0, a1 +; RV32IXQCI-NEXT: mv a1, a2 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_param: @@ -866,10 +866,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: xor a1, a1, a3 ; RV32-XQCICM-NEXT: xor a0, a0, a2 ; RV32-XQCICM-NEXT: or a0, a0, a1 -; RV32-XQCICM-NEXT: qc.mveqi a6, a0, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a0, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a0, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_eq: @@ -887,10 +887,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: xor a1, a1, a3 ; RV32IXQCI-NEXT: xor a0, a0, a2 ; RV32IXQCI-NEXT: or a0, a0, a1 -; RV32IXQCI-NEXT: qc.mveqi a6, a0, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a0, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_eq: @@ -943,10 +943,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: xor a1, a1, a3 ; RV32-XQCICM-NEXT: xor a0, a0, a2 ; RV32-XQCICM-NEXT: or a0, a0, a1 -; RV32-XQCICM-NEXT: qc.mvnei a6, a0, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a0, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a0, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a0, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ne: @@ -964,10 +964,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: xor a1, a1, a3 ; RV32IXQCI-NEXT: xor a0, a0, a2 ; RV32IXQCI-NEXT: or a0, a0, a1 -; RV32IXQCI-NEXT: qc.mvnei a6, a0, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a0, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a0, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a0, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ne: @@ -1025,10 +1025,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: sltu a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ugt: @@ -1050,10 +1050,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: sltu a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ugt: @@ -1111,10 +1111,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: sltu a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_uge: @@ -1136,10 +1136,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: sltu a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_uge: @@ -1197,10 +1197,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: sltu a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ult: @@ -1222,10 +1222,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: sltu a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ult: @@ -1283,10 +1283,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: sltu a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_ule: @@ -1308,10 +1308,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: sltu a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_ule: @@ -1369,10 +1369,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: slt a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sgt: @@ -1394,10 +1394,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: slt a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sgt: @@ -1455,10 +1455,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: slt a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sge: @@ -1480,10 +1480,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: slt a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sge: @@ -1541,10 +1541,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a0, a2 ; RV32-XQCICM-NEXT: slt a2, a1, a3 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_slt: @@ -1566,10 +1566,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a0, a2 ; RV32IXQCI-NEXT: slt a2, a1, a3 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_slt: @@ -1627,10 +1627,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32-XQCICM-NEXT: sltu a0, a2, a0 ; RV32-XQCICM-NEXT: slt a2, a3, a1 ; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0 -; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32-XQCICM-NEXT: mv a0, a6 -; RV32-XQCICM-NEXT: mv a1, a7 +; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32-XQCICM-NEXT: mv a0, a4 +; RV32-XQCICM-NEXT: mv a1, a5 ; RV32-XQCICM-NEXT: ret ; ; RV32-XQCICS-LABEL: select_i64_sle: @@ -1652,10 +1652,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind { ; RV32IXQCI-NEXT: sltu a0, a2, a0 ; RV32IXQCI-NEXT: slt a2, a3, a1 ; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0 -; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4 -; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5 -; RV32IXQCI-NEXT: mv a0, a6 -; RV32IXQCI-NEXT: mv a1, a7 +; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6 +; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7 +; RV32IXQCI-NEXT: mv a0, a4 +; RV32IXQCI-NEXT: mv a1, a5 ; RV32IXQCI-NEXT: ret ; ; RV64-LABEL: select_i64_sle: diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll index 19fade6..8273c65 100644 --- a/llvm/test/CodeGen/RISCV/select.ll +++ b/llvm/test/CodeGen/RISCV/select.ll @@ -1153,8 +1153,8 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_sub_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sub a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = sub i32 %a, %b @@ -1301,9 +1301,9 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) { ; ; RV32IXQCI-LABEL: select_sub_4: ; RV32IXQCI: # %bb.0: -; RV32IXQCI-NEXT: addi a1, a1, -128 -; RV32IXQCI-NEXT: li a2, 128 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: addi a2, a1, -128 +; RV32IXQCI-NEXT: li a1, 128 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %add = sub i32 %x, 128 @@ -1348,8 +1348,8 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_and_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: and a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = and i32 %a, %b @@ -1493,8 +1493,8 @@ define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_udiv_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: divu a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = udiv i32 %a, %b @@ -1682,8 +1682,8 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_shl_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sll a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = shl i32 %a, %b @@ -1798,8 +1798,8 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_ashr_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: sra a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = ashr i32 %a, %b @@ -1914,8 +1914,8 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) { ; RV32IXQCI-LABEL: select_lshr_1: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: srl a1, a1, a2 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %c = lshr i32 %a, %b @@ -2371,9 +2371,9 @@ define i32 @select_cst5(i1 zeroext %cond) { ; RV32IXQCI-LABEL: select_cst5: ; RV32IXQCI: # %bb.0: ; RV32IXQCI-NEXT: lui a1, 1 -; RV32IXQCI-NEXT: addi a1, a1, -2047 -; RV32IXQCI-NEXT: li a2, 2047 -; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2 +; RV32IXQCI-NEXT: addi a2, a1, -2047 +; RV32IXQCI-NEXT: li a1, 2047 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 ; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret %ret = select i1 %cond, i32 2047, i32 2049 @@ -2870,8 +2870,8 @@ define void @select_redundant_czero_eqz1(ptr %0, ptr %1) { ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: lui a2, %hi(select_redundant_czero_eqz_data) ; RV32IXQCI-NEXT: addi a2, a2, %lo(select_redundant_czero_eqz_data) -; RV32IXQCI-NEXT: qc.mveqi a0, a0, 0, a2 -; RV32IXQCI-NEXT: sw a0, 0(a1) +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a0 +; RV32IXQCI-NEXT: sw a2, 0(a1) ; RV32IXQCI-NEXT: ret entry: %3 = icmp eq ptr %0, null diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll index 1741be7..fb48301 100644 --- a/llvm/test/CodeGen/RISCV/xqcicm.ll +++ b/llvm/test/CodeGen/RISCV/xqcicm.ll @@ -23,15 +23,15 @@ define i32 @select_example(i32 %cond, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_example: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: andi a0, a0, 1 -; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCICM-NEXT: mv a0, a2 +; RV32IXQCICM-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCICM-NEXT: mv a0, a1 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_example: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: andi a0, a0, 1 -; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1 -; RV32IXQCI-NEXT: mv a0, a2 +; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2 +; RV32IXQCI-NEXT: mv a0, a1 ; RV32IXQCI-NEXT: ret entry: %cond_trunc = trunc i32 %cond to i1 @@ -52,14 +52,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -80,14 +80,14 @@ define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -108,14 +108,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -136,14 +136,14 @@ define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a @@ -164,14 +164,14 @@ define i32 @select_cc_example_slt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, 11 @@ -192,14 +192,14 @@ define i32 @select_cc_example_slt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 11, %a @@ -220,14 +220,14 @@ define i32 @select_cc_example_sle(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, 11 @@ -248,14 +248,14 @@ define i32 @select_cc_example_sle1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 11, %a @@ -276,14 +276,14 @@ define i32 @select_cc_example_sgt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, 11 @@ -304,14 +304,14 @@ define i32 @select_cc_example_sgt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 11, %a @@ -332,14 +332,14 @@ define i32 @select_cc_example_sge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, 11 @@ -360,14 +360,14 @@ define i32 @select_cc_example_sge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 11, %a @@ -388,14 +388,14 @@ define i32 @select_cc_example_ule(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, 11 @@ -416,14 +416,14 @@ define i32 @select_cc_example_ule1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 11, %a @@ -444,14 +444,14 @@ define i32 @select_cc_example_ugt(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, 11 @@ -472,14 +472,14 @@ define i32 @select_cc_example_ugt1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 11, %a @@ -500,14 +500,14 @@ define i32 @select_cc_example_ult(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, 11 @@ -528,14 +528,14 @@ define i32 @select_cc_example_ult1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 11, %a @@ -556,14 +556,14 @@ define i32 @select_cc_example_uge(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, 11 @@ -584,14 +584,14 @@ define i32 @select_cc_example_uge1(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge1: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge1: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 11, %a @@ -611,14 +611,14 @@ define i32 @select_cc_example_eq_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, %b @@ -638,14 +638,14 @@ define i32 @select_cc_example_ne_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, %b @@ -665,14 +665,14 @@ define i32 @select_cc_example_slt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_slt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_slt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp slt i32 %a, %b @@ -692,14 +692,14 @@ define i32 @select_cc_example_sge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sge i32 %a, %b @@ -719,14 +719,14 @@ define i32 @select_cc_example_sgt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sgt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sgt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sgt i32 %a, %b @@ -746,14 +746,14 @@ define i32 @select_cc_example_sle_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_sle_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_sle_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp sle i32 %a, %b @@ -773,14 +773,14 @@ define i32 @select_cc_example_ugt_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ugt_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ugt_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ugt i32 %a, %b @@ -800,14 +800,14 @@ define i32 @select_cc_example_ult_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ult_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ult_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ult i32 %a, %b @@ -827,14 +827,14 @@ define i32 @select_cc_example_uge_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_uge_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_uge_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp uge i32 %a, %b @@ -854,14 +854,14 @@ define i32 @select_cc_example_ule_reg(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ule_reg: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_reg: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, %b @@ -883,18 +883,263 @@ define i32 @select_cc_example_ule_neg(i32 %a, i32 %b, i32 %x, i32 %y) { ; RV32IXQCICM-LABEL: select_cc_example_ule_neg: ; RV32IXQCICM: # %bb.0: # %entry ; RV32IXQCICM-NEXT: li a1, -10 -; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ule_neg: ; RV32IXQCI: # %bb.0: # %entry ; RV32IXQCI-NEXT: li a1, -10 -; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ule i32 %a, -11 %sel = select i1 %cmp, i32 %x, i32 %y ret i32 %sel } + +define i32 @select_cc_example_eq_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: beq a2, a1, .LBB32_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB32_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvne a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: blt a2, a1, .LBB33_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB33_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvge a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bge a2, a1, .LBB34_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB34_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlt a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bltu a2, a1, .LBB35_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB35_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: bgeu a2, a1, .LBB36_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB36_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltu a0, a2, a1, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, %b + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_eq_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_eq_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: beq a2, a1, .LBB37_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB37_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_eq_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvnei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp eq i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_lt_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_lt_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: blt a2, a1, .LBB38_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB38_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_lt_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgei a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp slt i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: blt a1, a2, .LBB39_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB39_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvlti a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp sge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_ult_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_ult_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 11 +; RV32I-NEXT: bltu a2, a1, .LBB40_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB40_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_ult_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvgeui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp ult i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} + +define i32 @select_cc_example_uge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) { +; RV32I-LABEL: select_cc_example_uge_imm_mv: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: li a1, 10 +; RV32I-NEXT: bltu a1, a2, .LBB41_2 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: .LBB41_2: # %entry +; RV32I-NEXT: ret +; +; RV32IXQCICM-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCICM: # %bb.0: # %entry +; RV32IXQCICM-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCICM-NEXT: ret +; +; RV32IXQCI-LABEL: select_cc_example_uge_imm_mv: +; RV32IXQCI: # %bb.0: # %entry +; RV32IXQCI-NEXT: qc.mvltui a0, a2, 11, a3 +; RV32IXQCI-NEXT: ret +entry: + %cmp = icmp uge i32 %x, 11 + %sel = select i1 %cmp, i32 %a, i32 %y + ret i32 %sel +} diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll index 38de8fb..5b7ca9e7 100644 --- a/llvm/test/CodeGen/RISCV/xqcics.ll +++ b/llvm/test/CodeGen/RISCV/xqcics.ll @@ -134,14 +134,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 %a, 11 @@ -167,14 +167,14 @@ define i32 @select_cc_example_eq_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_eq_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_eq_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp eq i32 11, %a @@ -200,14 +200,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 %a, 11 @@ -233,14 +233,14 @@ define i32 @select_cc_example_ne_c(i32 %a, i32 %b, i32 %x, i32 %y) { ; ; RV32IXQCICM-LABEL: select_cc_example_ne_c: ; RV32IXQCICM: # %bb.0: # %entry -; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCICM-NEXT: mv a0, a3 +; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCICM-NEXT: mv a0, a2 ; RV32IXQCICM-NEXT: ret ; ; RV32IXQCI-LABEL: select_cc_example_ne_c: ; RV32IXQCI: # %bb.0: # %entry -; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2 -; RV32IXQCI-NEXT: mv a0, a3 +; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3 +; RV32IXQCI-NEXT: mv a0, a2 ; RV32IXQCI-NEXT: ret entry: %cmp = icmp ne i32 11, %a diff --git a/llvm/test/CodeGen/VE/Scalar/max.ll b/llvm/test/CodeGen/VE/Scalar/max.ll index 51da557..7950842 100644 --- a/llvm/test/CodeGen/VE/Scalar/max.ll +++ b/llvm/test/CodeGen/VE/Scalar/max.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @maxf64(double, double) { ; CHECK-LABEL: maxf64: @@ -10,16 +8,21 @@ define double @maxf64(double, double) { ; CHECK-NEXT: cmov.d.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxf64_fast(double, double) { +; CHECK-LABEL: maxf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @max2f64(double, double) { ; CHECK-LABEL: max2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @max2f64(double, double) { ; CHECK-NEXT: cmov.d.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2f64_fast(double, double) { +; CHECK-LABEL: max2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @maxuf64(double, double) { ; CHECK-LABEL: maxuf64: @@ -45,16 +53,21 @@ define double @maxuf64(double, double) { ; CHECK-NEXT: cmov.d.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @maxuf64_fast(double, double) { +; CHECK-LABEL: maxuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + ; VE has no max for unordered comparison define double @max2uf64(double, double) { ; CHECK-LABEL: max2uf64: @@ -63,16 +76,21 @@ define double @max2uf64(double, double) { ; CHECK-NEXT: cmov.d.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmax.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @max2uf64_fast(double, double) { +; CHECK-LABEL: max2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @maxf32(float, float) { ; CHECK-LABEL: maxf32: ; CHECK: # %bb.0: @@ -80,16 +98,21 @@ define float @maxf32(float, float) { ; CHECK-NEXT: cmov.s.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ogt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxf32_fast(float, float) { +; CHECK-LABEL: maxf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ogt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2f32(float, float) { ; CHECK-LABEL: max2f32: ; CHECK: # %bb.0: @@ -97,16 +120,21 @@ define float @max2f32(float, float) { ; CHECK-NEXT: cmov.s.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp oge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2f32_fast(float, float) { +; CHECK-LABEL: max2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp oge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @maxuf32(float, float) { ; CHECK-LABEL: maxuf32: ; CHECK: # %bb.0: @@ -114,16 +142,21 @@ define float @maxuf32(float, float) { ; CHECK-NEXT: cmov.s.gtnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ugt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @maxuf32_fast(float, float) { +; CHECK-LABEL: maxuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ugt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @max2uf32(float, float) { ; CHECK-LABEL: max2uf32: ; CHECK: # %bb.0: @@ -131,26 +164,26 @@ define float @max2uf32(float, float) { ; CHECK-NEXT: cmov.s.genan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmax.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp uge float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @max2uf32_fast(float, float) { +; CHECK-LABEL: max2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmax.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp uge float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @maxi64(i64, i64) { ; CHECK-LABEL: maxi64: ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -161,11 +194,6 @@ define i64 @max2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i64: -; OPT: # %bb.0: -; OPT-NEXT: maxs.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -178,13 +206,6 @@ define i64 @maxu64(i64, i64) { ; CHECK-NEXT: cmov.l.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -197,13 +218,6 @@ define i64 @max2u64(i64, i64) { ; CHECK-NEXT: cmov.l.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -214,11 +228,6 @@ define i32 @maxi32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sgt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -229,11 +238,6 @@ define i32 @max2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2i32: -; OPT: # %bb.0: -; OPT-NEXT: maxs.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -246,13 +250,6 @@ define i32 @maxu32(i32, i32) { ; CHECK-NEXT: cmov.w.gt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.gt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ugt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -265,13 +262,6 @@ define i32 @max2u32(i32, i32) { ; CHECK-NEXT: cmov.w.ge %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: max2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.ge %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp uge i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,12 +273,6 @@ define zeroext i1 @maxi1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: or %s0, %s0, %s1 ; CHECK-NEXT: and %s0, 1, %s0 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: maxi1: -; OPT: # %bb.0: -; OPT-NEXT: or %s0, %s0, %s1 -; OPT-NEXT: and %s0, 1, %s0 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %1, true %4 = and i1 %3, %0 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll index e8f4939..36a2e06 100644 --- a/llvm/test/CodeGen/VE/Scalar/min.ll +++ b/llvm/test/CodeGen/VE/Scalar/min.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s -; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \ -; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT define double @minf64(double, double) { ; CHECK-LABEL: minf64: @@ -10,16 +8,21 @@ define double @minf64(double, double) { ; CHECK-NEXT: cmov.d.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minf64_fast(double, double) { +; CHECK-LABEL: minf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2f64(double, double) { ; CHECK-LABEL: min2f64: ; CHECK: # %bb.0: @@ -27,16 +30,21 @@ define double @min2f64(double, double) { ; CHECK-NEXT: cmov.d.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2f64_fast(double, double) { +; CHECK-LABEL: min2f64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @minuf64(double, double) { ; CHECK-LABEL: minuf64: ; CHECK: # %bb.0: @@ -44,16 +52,21 @@ define double @minuf64(double, double) { ; CHECK-NEXT: cmov.d.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @minuf64_fast(double, double) { +; CHECK-LABEL: minuf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define double @min2uf64(double, double) { ; CHECK-LABEL: min2uf64: ; CHECK: # %bb.0: @@ -61,16 +74,21 @@ define double @min2uf64(double, double) { ; CHECK-NEXT: cmov.d.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf64: -; OPT: # %bb.0: -; OPT-NEXT: fmin.d %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule double %0, %1 %4 = select i1 %3, double %0, double %1 ret double %4 } +define double @min2uf64_fast(double, double) { +; CHECK-LABEL: min2uf64_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.d %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule double %0, %1 + %4 = select nnan nsz i1 %3, double %0, double %1 + ret double %4 +} + define float @minf32(float, float) { ; CHECK-LABEL: minf32: ; CHECK: # %bb.0: @@ -78,16 +96,21 @@ define float @minf32(float, float) { ; CHECK-NEXT: cmov.s.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp olt float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minf32_fast(float, float) { +; CHECK-LABEL: minf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp olt float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2f32(float, float) { ; CHECK-LABEL: min2f32: ; CHECK: # %bb.0: @@ -95,16 +118,21 @@ define float @min2f32(float, float) { ; CHECK-NEXT: cmov.s.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2f32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ole float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2f32_fast(float, float) { +; CHECK-LABEL: min2f32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ole float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @minuf32(float, float) { ; CHECK-LABEL: minuf32: ; CHECK: # %bb.0: @@ -112,16 +140,21 @@ define float @minuf32(float, float) { ; CHECK-NEXT: cmov.s.ltnan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minuf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ult float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @minuf32_fast(float, float) { +; CHECK-LABEL: minuf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ult float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define float @min2uf32(float, float) { ; CHECK-LABEL: min2uf32: ; CHECK: # %bb.0: @@ -129,26 +162,26 @@ define float @min2uf32(float, float) { ; CHECK-NEXT: cmov.s.lenan %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2uf32: -; OPT: # %bb.0: -; OPT-NEXT: fmin.s %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = fcmp ule float %0, %1 %4 = select i1 %3, float %0, float %1 ret float %4 } +define float @min2uf32_fast(float, float) { +; CHECK-LABEL: min2uf32_fast: +; CHECK: # %bb.0: +; CHECK-NEXT: fmin.s %s0, %s0, %s1 +; CHECK-NEXT: b.l.t (, %s10) + %3 = fcmp ule float %0, %1 + %4 = select nnan nsz i1 %3, float %0, float %1 + ret float %4 +} + define i64 @mini64(i64, i64) { ; CHECK-LABEL: mini64: ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -159,11 +192,6 @@ define i64 @min2i64(i64, i64) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.l %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i64: -; OPT: # %bb.0: -; OPT-NEXT: mins.l %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -176,13 +204,6 @@ define i64 @minu64(i64, i64) { ; CHECK-NEXT: cmov.l.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -195,13 +216,6 @@ define i64 @min2u64(i64, i64) { ; CHECK-NEXT: cmov.l.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u64: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.l %s2, %s0, %s1 -; OPT-NEXT: cmov.l.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i64 %0, %1 %4 = select i1 %3, i64 %0, i64 %1 ret i64 %4 @@ -212,11 +226,6 @@ define i32 @mini32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp slt i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -227,11 +236,6 @@ define i32 @min2i32(i32, i32) { ; CHECK: # %bb.0: ; CHECK-NEXT: mins.w.sx %s0, %s0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2i32: -; OPT: # %bb.0: -; OPT-NEXT: mins.w.sx %s0, %s0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp sle i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -244,13 +248,6 @@ define i32 @minu32(i32, i32) { ; CHECK-NEXT: cmov.w.lt %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: minu32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.lt %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ult i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -263,13 +260,6 @@ define i32 @min2u32(i32, i32) { ; CHECK-NEXT: cmov.w.le %s1, %s0, %s2 ; CHECK-NEXT: or %s0, 0, %s1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: min2u32: -; OPT: # %bb.0: -; OPT-NEXT: cmpu.w %s2, %s0, %s1 -; OPT-NEXT: cmov.w.le %s1, %s0, %s2 -; OPT-NEXT: or %s0, 0, %s1 -; OPT-NEXT: b.l.t (, %s10) %3 = icmp ule i32 %0, %1 %4 = select i1 %3, i32 %0, i32 %1 ret i32 %4 @@ -283,14 +273,6 @@ define zeroext i1 @mini1(i1 zeroext, i1 zeroext) { ; CHECK-NEXT: cmov.w.ne %s0, %s1, %s2 ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 ; CHECK-NEXT: b.l.t (, %s10) -; -; OPT-LABEL: mini1: -; OPT: # %bb.0: -; OPT-NEXT: and %s2, 1, %s0 -; OPT-NEXT: and %s0, %s1, %s0 -; OPT-NEXT: cmov.w.ne %s0, %s1, %s2 -; OPT-NEXT: adds.w.zx %s0, %s0, (0)1 -; OPT-NEXT: b.l.t (, %s10) %3 = xor i1 %0, true %4 = and i1 %3, %1 %5 = select i1 %4, i1 %0, i1 %1 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll index bec3349..3590c4d 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll @@ -62,15 +62,12 @@ define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) define <4 x float> @freeze_insertps(<4 x float> %a0, <4 x float> %a1) { ; SSE-LABEL: freeze_insertps: ; SSE: # %bb.0: -; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; SSE-NEXT: insertps {{.*#+}} xmm1 = xmm0[1],xmm1[1,2,3] ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: freeze_insertps: ; AVX: # %bb.0: -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[1],xmm1[1,2,3] +; AVX-NEXT: vmovaps %xmm1, %xmm0 ; AVX-NEXT: retq %s0 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 16) %f0 = freeze <4 x float> %s0 diff --git a/llvm/test/MC/LoongArch/Macros/macros-la.s b/llvm/test/MC/LoongArch/Macros/macros-la.s index a732988..8022d5b 100644 --- a/llvm/test/MC/LoongArch/Macros/macros-la.s +++ b/llvm/test/MC/LoongArch/Macros/macros-la.s @@ -26,6 +26,7 @@ la.abs $a0, sym_abs # ABS-NEXT: lu32i.d $a0, %abs64_lo20(sym_abs) # ABS-NEXT: lu52i.d $a0, $a0, %abs64_hi12(sym_abs) # ABS-EMPTY: +# RELOC-NEXT: R_LARCH_MARK_LA - 0x0 # RELOC-NEXT: R_LARCH_ABS_HI20 sym_abs 0x0 # RELOC-NEXT: R_LARCH_ABS_LO12 sym_abs 0x0 # RELOC-NEXT: R_LARCH_ABS64_LO20 sym_abs 0x0 diff --git a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll index b26320b..6fbe960 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll @@ -6,9 +6,10 @@ declare ptr @memchr(ptr, i32, i64) -define i1 @test_memchr_null(i32 %x) { +define i1 @test_memchr_null(i32 %x) !prof !0 { ; CHECK-LABEL: define i1 @test_memchr_null( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0:![0-9]+]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -40,9 +41,10 @@ entry: ret i1 %isnull } -define ptr @test_memchr(i32 %x) { +define ptr @test_memchr(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ @@ -72,16 +74,17 @@ entry: ret ptr %memchr } -define ptr @test_memchr_smaller_n(i32 %x) { +define ptr @test_memchr_smaller_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_smaller_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8 ; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [ ; CHECK-NEXT: i8 48, label %[[MEMCHR_CASE:.*]] ; CHECK-NEXT: i8 49, label %[[MEMCHR_CASE1:.*]] ; CHECK-NEXT: i8 0, label %[[MEMCHR_CASE2:.*]] -; CHECK-NEXT: ] +; CHECK-NEXT: ], !prof [[PROF_1:![0-9]+]] ; CHECK: [[MEMCHR_CASE]]: ; CHECK-NEXT: br label %[[MEMCHR_SUCCESS:.*]] ; CHECK: [[MEMCHR_CASE1]]: @@ -103,9 +106,10 @@ entry: ; negative tests -define ptr @test_memchr_larger_n(i32 %x) { +define ptr @test_memchr_larger_n(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_larger_n( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 6) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -115,9 +119,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_non_constant(i32 %x, ptr %str) { +define ptr @test_memchr_non_constant(i32 %x, ptr %str) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant( -; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr [[STR]], i32 [[X]], i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -127,8 +132,9 @@ entry: ret ptr %memchr } -define ptr @test_memchr_constant_ch() { -; CHECK-LABEL: define ptr @test_memchr_constant_ch() { +define ptr @test_memchr_constant_ch() !prof !0 { +; CHECK-LABEL: define ptr @test_memchr_constant_ch() +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 49, i64 5) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -138,9 +144,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) { +define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_dynamic_n( -; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i32 [[Y]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -150,9 +157,10 @@ entry: ret ptr %memchr } -define ptr @test_memchr_long(i32 %x) { +define ptr @test_memchr_long(i32 %x) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_long( -; CHECK-SAME: i32 [[X:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str_long, i32 [[X]], i64 8) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -163,9 +171,10 @@ entry: } ; We want to check that the compiler still calls memchr if the length is non-constant: -define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) { +define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) !prof !0 { ; CHECK-LABEL: define ptr @test_memchr_non_constant_length2( -; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) { +; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) +; CHECK: !prof [[PROF_0]] { ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 [[LEN]]) ; CHECK-NEXT: ret ptr [[MEMCHR]] @@ -174,3 +183,7 @@ entry: %memchr = call ptr @memchr(ptr @str, i32 %x, i64 %len) ret ptr %memchr } + +!0 = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000} +; CHECK: [[PROF_1]] = !{!"unknown", !"aggressive-instcombine"}
\ No newline at end of file diff --git a/llvm/test/Transforms/InstCombine/preserve-profile.ll b/llvm/test/Transforms/InstCombine/preserve-profile.ll index dd83805..8cb3e68 100644 --- a/llvm/test/Transforms/InstCombine/preserve-profile.ll +++ b/llvm/test/Transforms/InstCombine/preserve-profile.ll @@ -46,9 +46,59 @@ define i32 @NegBin(i1 %C) !prof !0 { ret i32 %V } +define i32 @select_C_minus_1_or_C_from_bool(i1 %x) !prof !0 { +; CHECK-LABEL: define i32 @select_C_minus_1_or_C_from_bool( +; CHECK-SAME: i1 [[X:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[X]], i32 41, i32 42, !prof [[PROF2:![0-9]+]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %ext = sext i1 %x to i32 + %add = add i32 %ext, 42 + ret i32 %add +} + +define i5 @and_add(i1 %x, i1 %y) !prof !0 { +; CHECK-LABEL: define i5 @and_add( +; CHECK-SAME: i1 [[X:%.*]], i1 [[Y:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X]], true +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP2]], i5 -2, i5 0, !prof [[PROF2]] +; CHECK-NEXT: ret i5 [[R]] +; + %xz = zext i1 %x to i5 + %ys = sext i1 %y to i5 + %sub = add i5 %xz, %ys + %r = and i5 %sub, 30 + ret i5 %r +} + +define i32 @add_zext_zext_i1(i1 %a) !prof !0 { +; CHECK-LABEL: define i32 @add_zext_zext_i1( +; CHECK-SAME: i1 [[A:%.*]]) !prof [[PROF0]] { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A]], i32 2, i32 0, !prof [[PROF2]] +; CHECK-NEXT: ret i32 [[ADD]] +; + %zext = zext i1 %a to i32 + %add = add i32 %zext, %zext + ret i32 %add +} + +define i32 @no_count_no_branch_weights(i1 %a) { +; CHECK-LABEL: define i32 @no_count_no_branch_weights( +; CHECK-SAME: i1 [[A:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A]], i32 2, i32 0 +; CHECK-NEXT: ret i32 [[ADD]] +; + %zext = zext i1 %a to i32 + %add = add i32 %zext, %zext + ret i32 %add +} + + !0 = !{!"function_entry_count", i64 1000} !1 = !{!"branch_weights", i32 2, i32 3} ;. ; CHECK: [[PROF0]] = !{!"function_entry_count", i64 1000} ; CHECK: [[PROF1]] = !{!"branch_weights", i32 2, i32 3} +; CHECK: [[PROF2]] = !{!"unknown", !"instcombine"} ;. diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll index 8e9cb23..75420d4 100644 --- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll +++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-constant-size.ll @@ -1,4 +1,4 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6 ; RUN: opt -p loop-vectorize -force-vector-width=2 -S %s | FileCheck %s declare void @llvm.assume(i1) @@ -47,29 +47,8 @@ define void @deref_assumption_in_header_constant_trip_count(ptr noalias noundef ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -123,27 +102,8 @@ define void @align_deref_assumption_in_header_constant_trip_count_loop_invariant ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] @@ -216,29 +176,8 @@ define void @deref_assumption_too_small_in_header_constant_trip_count(ptr noalia ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 2) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -312,29 +251,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_1(ptr noalias ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -408,29 +326,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_via_arg_attrib ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -504,29 +401,8 @@ define void @deref_assumption_in_header_constant_trip_count_align_not_known(ptr ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -596,29 +472,8 @@ define void @deref_assumption_in_then_constant_trip_count(ptr noalias noundef %a ; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -692,29 +547,8 @@ define void @deref_assumption_in_latch_constant_trip_count(ptr noalias noundef % ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: br label %loop.header @@ -747,7 +581,7 @@ exit: define void @deref_assumption_in_header_variable_trip_count(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c, i64 %N) nofree nosync{ ; CHECK-LABEL: define void @deref_assumption_in_header_variable_trip_count( ; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR1]] { -; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 2 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: @@ -792,30 +626,8 @@ define void @deref_assumption_in_header_variable_trip_count(ptr noalias noundef ; CHECK-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]] ; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void ; entry: br label %loop.header @@ -867,28 +679,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_1(ptr noali ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -958,28 +750,8 @@ define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_1 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 3999) ] @@ -1031,28 +803,8 @@ define void @align_and_deref_assumption_in_preheader_constant_trip_count_align_4 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] @@ -1105,28 +857,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_4_known_via ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -1196,28 +928,8 @@ define void @deref_assumption_in_preheader_constant_trip_count_align_4_not_known ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 4000) ] @@ -1287,28 +999,8 @@ define void @deref_assumption_too_small_in_preheader_constant_trip_count_align_4 ; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %a, i64 3999) ] @@ -1376,27 +1068,8 @@ define void @may_free_align_deref_assumption_in_header_constant_trip_count_loop_ ; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4) ] @@ -1465,27 +1138,8 @@ define void @may_free_local_ptr_align_deref_assumption_in_header_constant_trip_c ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: %a = call ptr @get_ptr() @@ -1530,10 +1184,10 @@ define void @deref_assumption_in_header_constant_trip_count_nofree_via_context(p ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 -; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD1]], zeroinitializer +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 +; CHECK-NEXT: br i1 [[TMP13]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] ; CHECK: [[PRED_LOAD_IF]]: ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] @@ -1542,7 +1196,7 @@ define void @deref_assumption_in_header_constant_trip_count_nofree_via_context(p ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] ; CHECK: [[PRED_LOAD_CONTINUE]]: ; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 ; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] ; CHECK: [[PRED_LOAD_IF1]]: ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 @@ -1551,36 +1205,16 @@ define void @deref_assumption_in_header_constant_trip_count_nofree_via_context(p ; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] ; CHECK: [[PRED_LOAD_CONTINUE2]]: -; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] -; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]] ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] @@ -1621,19 +1255,14 @@ define void @deref_assumption_in_header_constant_trip_count_may_free(ptr noalias ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] -; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2]] ] -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], <2 x i64> [[VEC_IND]] -; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x ptr> [[TMP0]], i32 0 -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i64 4), "dereferenceable"(ptr [[TMP1]], i64 4) ] -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x ptr> [[TMP0]], i32 1 -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP2]], i64 4), "dereferenceable"(ptr [[TMP2]], i64 4) ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0 ; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] ; CHECK: [[PRED_LOAD_IF]]: -; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x ptr> [[TMP0]], i32 0 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP17]] ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i32 0 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] @@ -1642,7 +1271,8 @@ define void @deref_assumption_in_header_constant_trip_count_may_free(ptr noalias ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1 ; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] ; CHECK: [[PRED_LOAD_IF1]]: -; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x ptr> [[TMP0]], i32 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP18]] ; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4 ; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> [[TMP9]], i32 [[TMP12]], i32 1 ; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] @@ -1652,33 +1282,11 @@ define void @deref_assumption_in_header_constant_trip_count_may_free(ptr noalias ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] ; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP15]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 -; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2) ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 ; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH:.*]]: -; CHECK-NEXT: br label %[[LOOP_HEADER:.*]] -; CHECK: [[LOOP_HEADER]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ] -; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_A]], i64 4), "dereferenceable"(ptr [[GEP_A]], i64 4) ] -; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4 -; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0 -; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]] -; CHECK: [[LOOP_THEN]]: -; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4 -; CHECK-NEXT: br label %[[LOOP_LATCH]] -; CHECK: [[LOOP_LATCH]]: -; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP_HEADER]] ] -; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP_HEADER]] -; CHECK: [[EXIT]]: -; CHECK-NEXT: ret void +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] ; entry: call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] @@ -1688,7 +1296,6 @@ entry: loop.header: %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] %gep.a = getelementptr i32, ptr %a, i64 %iv - call void @llvm.assume(i1 true) [ "align"(ptr %gep.a, i64 4), "dereferenceable"(ptr %gep.a, i64 4) ] %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv %l.b = load i32, ptr %gep.b, align 4 %c.1 = icmp sge i32 %l.b, 0 @@ -1709,27 +1316,163 @@ loop.latch: exit: ret void } -;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]} -; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]} -; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]} -; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]} -; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]} -; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]} -; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]} -; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]} -; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]} -; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]} -; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]} -; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]} -; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]} -; CHECK: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]]} -; CHECK: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]} -; CHECK: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]], [[META2]]} -;. + +define void @deref_assumption_in_header_constant_trip_count_nofree_via_context_but_missing_nosync(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c) { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_nofree_via_context_but_missing_nosync( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + +define void @deref_assumption_in_header_constant_trip_count_multiple_loop_predecessors(ptr noalias noundef %a, ptr noalias %b, ptr noalias %c, i1 %pre) nosync { +; CHECK-LABEL: define void @deref_assumption_in_header_constant_trip_count_multiple_loop_predecessors( +; CHECK-SAME: ptr noalias noundef [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i1 [[PRE:%.*]]) #[[ATTR2]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[A]], i64 4), "dereferenceable"(ptr [[A]], i64 4000) ] +; CHECK-NEXT: br i1 [[PRE]], label %[[THEN:.*]], label %[[ELSE:.*]] +; CHECK: [[THEN]]: +; CHECK-NEXT: store i32 0, ptr [[A]], align 4 +; CHECK-NEXT: br label %[[LOOP_HEADER_PREHEADER:.*]] +; CHECK: [[ELSE]]: +; CHECK-NEXT: store i32 0, ptr [[B]], align 4 +; CHECK-NEXT: br label %[[LOOP_HEADER_PREHEADER]] +; CHECK: [[LOOP_HEADER_PREHEADER]]: +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP0]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0 +; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]] +; CHECK: [[PRED_LOAD_IF]]: +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[TMP5]], i32 0 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]] +; CHECK: [[PRED_LOAD_CONTINUE]]: +; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP6]], %[[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1 +; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_IF1]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[A]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP7]], i32 [[TMP11]], i32 1 +; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]] +; CHECK: [[PRED_LOAD_CONTINUE2]]: +; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP12]], %[[PRED_LOAD_IF1]] ] +; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[TMP13]], <2 x i32> [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]] +; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP14]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br [[EXIT:label %.*]] +; CHECK: [[SCALAR_PH:.*:]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(ptr %a, i64 4), "dereferenceable"(ptr %a, i64 4000) ] + br i1 %pre, label %then, label %else + +then: + store i32 0, ptr %a + br label %loop.header + +else: + store i32 0, ptr %b + br label %loop.header + +loop.header: + %iv = phi i64 [ 0, %then ], [ 0, %else ], [ %iv.next, %loop.latch ] + %gep.a = getelementptr i32, ptr %a, i64 %iv + %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv + %l.b = load i32, ptr %gep.b, align 4 + %c.1 = icmp sge i32 %l.b, 0 + br i1 %c.1, label %loop.latch, label %loop.then + +loop.then: + %l.a = load i32, ptr %gep.a, align 4 + br label %loop.latch + +loop.latch: + %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop.header ] + %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv + store i32 %merge, ptr %gep.c, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %ec = icmp eq i64 %iv.next, 1000 + br i1 %ec, label %exit, label %loop.header + +exit: + ret void +} + + diff --git a/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll b/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll index 7664fda..9cdcdf1 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/xor-combined-opcode.ll @@ -6,7 +6,7 @@ define i1 @foo(i1 %v) { ; assume %v is 1 ; CHECK-NEXT: [[ENTRY:.*:]] ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i1> poison, i1 [[V]], i32 0 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i1> [[TMP0]], <2 x i1> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[TMP2:%.*]] = mul <2 x i1> <i1 false, i1 true>, [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i1> zeroinitializer, [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i1> [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i1> [[TMP2]], i32 1 ; CHECK-NEXT: [[SUB:%.*]] = sub i1 [[TMP3]], [[TMP4]] diff --git a/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml b/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml index 5312c25..17e91f1 100644 --- a/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml +++ b/llvm/test/tools/llvm-dwarfdump/verify_stmt_seq.yaml @@ -2,9 +2,9 @@ # Then manually tempered with some of the value of the attribute # I hope there are easier ways to construct tests like this. -# RUN: yaml2obj %s -o verify_stmt_seq.o -# RUN: not llvm-dwarfdump -verify -debug-info verify_stmt_seq.o | FileCheck %s --check-prefix=CHECK_INVALID --implicit-check-not=error: -# RUN: llvm-dwarfdump -debug-line -verbose -debug-info verify_stmt_seq.o | FileCheck %s --check-prefix=CHECK_DEBUG_LINE +# RUN: yaml2obj %s -o %t.o +# RUN: not llvm-dwarfdump -verify -debug-info %t.o | FileCheck %s --check-prefix=CHECK_INVALID --implicit-check-not=error: +# RUN: llvm-dwarfdump -debug-line -verbose -debug-info %t.o | FileCheck %s --check-prefix=CHECK_DEBUG_LINE # CHECK_INVALID: error: DW_AT_LLVM_stmt_sequence offset 0x00000000 is not within the line table bounds [0x00000034, 0x000000fd) # CHECK_INVALID: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] (0x00000000) diff --git a/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test new file mode 100644 index 0000000..391b7ee --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading-fail.test @@ -0,0 +1,26 @@ +## Test that --offloading with a fatbin works correctly. +# REQUIRES: amdgpu-registered-target + +# RUN: yaml2obj %s -o %t.elf +# RUN: llvm-readobj --offloading %t.elf 2>&1 | \ +# RUN: FileCheck %s --check-prefix=WARN -DFILE_NAME=%t.elf + +# WARN: warning: '{{.*}}': Stream Error: The stream is too short to perform the requested operation. + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC +Sections: + - Name: .hip_fatbin + Type: SHT_PROGBITS + AddressAlign: 0x1000 + Content: 5F5F434C414E475F4F46464C4F41445F42554E444C455F5F0200000000000000001000000000000000000000000000001B0000000000000075782D2D0010000000000000D00F0000000000001F0000000000000068697076342D616D6467636E2D616D642D616D646873612D2D676678393038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007F454C460201014003000000000000000300E0000100000000000000000000004000000000000000100C0000000000003005000040003800090040000F000D000600000004000000400000000000000040000000000000004000000000000000F801000000000000F80100000000000008000000000000000100000004000000000000000000000000000000000000000000000000000000C008000000000000C008000000000000001000000000000001000000050000000009000000000000001900000000000000190000000000006C000000000000006C00000000000000001000000000000001000000060000007009000000000000702900000000000070290000000000007000000000000000900600000000000000100000000000000100000006000000E009000000000000E039000000000000E039000000000000000000000000000001000000000000000010000000000000020000000600000070090000000000007029000000000000702900000000000070000000000000007000000000000000080000000000000052E574640400000070090000000000007029000000000000702900000000000070000000000000009006000000000000010000000000000051E57464060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000004000000380200000000000038020000000000003802000000000000340500000000000034050000000000000400000000000000070000001D05000020000000414D44475055000083AE616D646873612E6B65726E656C7391DE0012AB2E616770725F636F756E7400A52E61726773DC001085AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA415F642E636F65726365A72E6F666673657400A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657285AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA425F642E636F65726365A72E6F666673657408A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657284A52E6E616D65A14EA72E6F666673657410A52E73697A6508AB2E76616C75655F6B696E64A862795F76616C756583A72E6F666673657418A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7883A72E6F66667365741CA52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7983A72E6F666673657420A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7A83A72E6F666673657424A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7883A72E6F666673657426A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7983A72E6F666673657428A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7A83A72E6F66667365742AA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7883A72E6F66667365742CA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7983A72E6F66667365742EA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7A83A72E6F666673657440A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7883A72E6F666673657448A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7983A72E6F666673657450A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7A83A72E6F666673657458A52E73697A6502AB2E76616C75655F6B696E64B068696464656E5F677269645F64696D73B92E67726F75705F7365676D656E745F66697865645F73697A6500B62E6B65726E6172675F7365676D656E745F616C69676E08B52E6B65726E6172675F7365676D656E745F73697A65CD0118A92E6C616E6775616765A84F70656E434C2043B12E6C616E67756167655F76657273696F6E920200B82E6D61785F666C61745F776F726B67726F75705F73697A65CD0400A52E6E616D65B25F5A3973696D706C65416464506A504B6A6DBB2E707269766174655F7365676D656E745F66697865645F73697A6500AB2E736770725F636F756E740CB12E736770725F7370696C6C5F636F756E7400A72E73796D626F6CB55F5A3973696D706C65416464506A504B6A6D2E6B64B82E756E69666F726D5F776F726B5F67726F75705F73697A6501B32E757365735F64796E616D69635F737461636BC2AB2E766770725F636F756E7404B12E766770725F7370696C6C5F636F756E7400AF2E7761766566726F6E745F73697A6540AD616D646873612E746172676574B9616D6467636E2D616D642D616D646873612D2D676678393038AE616D646873612E76657273696F6E92010200000000000000000000000000000000000000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E03900000000000001000000000000000100000001000000010000001A000000000008400000D20001000000360A4A7A5238A4D3F113F4DD04000000040000000200000001000000000000000300000000000000000000000000000000000000005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F623730363264386333326134613933330000000000000000000000000000000000000000000000000000000000000000000000180100000000000080100000000000000000000000000000000000000000000000000000000000004000AF008C000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000C20102C02400000002000AC0000000008002027E7FC08CBF07FF0486FFFF0000060406920600006800008FD2820002000302067E0200043203030638008050DC02007F020102067E0000003203030238008050DC00007F03700F8CBF03050468008070DC00027F00000081BF00000000060000000000000070070000000000000B000000000000001800000000000000050000000000000020080000000000000A000000000000004600000000000000F5FEFF6F00000000D0070000000000000400000000000000F807000000000000000000000000000000000000000000004C696E6B65723A20414D44204C4C442031392E302E3000414D4420636C616E672076657273696F6E2031392E302E306769742028202032343231322063393630313665636534313337356462646438663037356266333762643666633333323230376233290000414D4420636C616E672076657273696F6E2031382E302E3067697420287373683A2F2F6765727269746769742F6C696768746E696E672F65632F6C6C766D2D70726F6A65637420616D642D6D61696E6C696E652D6F70656E20323431373620663935303039613166393032313232343865313036333964653837653635636163616338643961372900000000000000000000000000000000000000000000000000460000000002080070290000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E0390000000000000100000000000000002E6E6F7465002E64796E73796D002E676E752E68617368002E68617368002E64796E737472002E726F64617461002E74657874002E64796E616D6963002E72656C726F5F70616464696E67002E627373002E636F6D6D656E74002E73796D746162002E7368737472746162002E73747274616200005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F62373036326438633332613461393333005F44594E414D494300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000070000000200000000000000380200000000000038020000000000003405000000000000000000000000000004000000000000000000000000000000070000000B00000002000000000000007007000000000000700700000000000060000000000000000500000001000000080000000000000018000000000000000F000000F6FFFF6F0200000000000000D007000000000000D007000000000000280000000000000002000000000000000800000000000000000000000000000019000000050000000200000000000000F807000000000000F80700000000000028000000000000000200000000000000040000000000000004000000000000001F000000030000000200000000000000200800000000000020080000000000004600000000000000000000000000000001000000000000000000000000000000270000000100000002000000000000008008000000000000800800000000000040000000000000000000000000000000400000000000000000000000000000002F000000010000000600000000000000001900000000000000090000000000006C00000000000000000000000000000000010000000000000000000000000000350000000600000003000000000000007029000000000000700900000000000070000000000000000500000000000000080000000000000010000000000000003E000000080000000300000000000000E029000000000000E00900000000000020060000000000000000000000000000010000000000000000000000000000004D000000080000000300000000000000E039000000000000E0090000000000000100000000000000000000000000000001000000000000000000000000000000520000000100000030000000000000000000000000000000E009000000000000F0000000000000000000000000000000010000000000000001000000000000005B0000000200000000000000000000000000000000000000D00A00000000000078000000000000000E0000000200000008000000000000001800000000000000630000000300000000000000000000000000000000000000480B00000000000075000000000000000000000000000000010000000000000000000000000000006D0000000300000000000000000000000000000000000000BD0B0000000000004F00000000000000000000000000000001000000000000000000000000000000 + - Name: .hipFatBinSegment + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x202FD0 + AddressAlign: 0x8 + Content: '465049480100000000102000000000000000000000000000' +... diff --git a/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test new file mode 100644 index 0000000..21ee60d --- /dev/null +++ b/llvm/test/tools/llvm-readobj/ELF/AMDGPU/offloading.test @@ -0,0 +1,27 @@ +## Test that --offloading with a fatbin works correctly. +# REQUIRES: amdgpu-registered-target + +# RUN: yaml2obj %s -o %t.elf +# RUN: llvm-readobj --offloading %t.elf | \ +# RUN: FileCheck %s -DFILE_NAME=%t.elf + +# CHECK: host-x86_64-unknown-linux-- file://[[FILE_NAME]]#offset=8192&size=0 +# CHECK-NEXT: hipv4-amdgcn-amd-amdhsa--gfx908 file://[[FILE_NAME]]#offset=8192&size=4048 + +--- !ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_EXEC +Sections: + - Name: .hip_fatbin + Type: SHT_PROGBITS + AddressAlign: 0x1000 + Content: 5F5F434C414E475F4F46464C4F41445F42554E444C455F5F0200000000000000001000000000000000000000000000001B00000000000000686F73742D7838365F36342D756E6B6E6F776E2D6C696E75782D2D0010000000000000D00F0000000000001F0000000000000068697076342D616D6467636E2D616D642D616D646873612D2D676678393038000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007F454C460201014003000000000000000300E0000100000000000000000000004000000000000000100C0000000000003005000040003800090040000F000D000600000004000000400000000000000040000000000000004000000000000000F801000000000000F80100000000000008000000000000000100000004000000000000000000000000000000000000000000000000000000C008000000000000C008000000000000001000000000000001000000050000000009000000000000001900000000000000190000000000006C000000000000006C00000000000000001000000000000001000000060000007009000000000000702900000000000070290000000000007000000000000000900600000000000000100000000000000100000006000000E009000000000000E039000000000000E039000000000000000000000000000001000000000000000010000000000000020000000600000070090000000000007029000000000000702900000000000070000000000000007000000000000000080000000000000052E574640400000070090000000000007029000000000000702900000000000070000000000000009006000000000000010000000000000051E57464060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000004000000380200000000000038020000000000003802000000000000340500000000000034050000000000000400000000000000070000001D05000020000000414D44475055000083AE616D646873612E6B65726E656C7391DE0012AB2E616770725F636F756E7400A52E61726773DC001085AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA415F642E636F65726365A72E6F666673657400A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657285AE2E616464726573735F7370616365A6676C6F62616CA52E6E616D65AA425F642E636F65726365A72E6F666673657408A52E73697A6508AB2E76616C75655F6B696E64AD676C6F62616C5F62756666657284A52E6E616D65A14EA72E6F666673657410A52E73697A6508AB2E76616C75655F6B696E64A862795F76616C756583A72E6F666673657418A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7883A72E6F66667365741CA52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7983A72E6F666673657420A52E73697A6504AB2E76616C75655F6B696E64B468696464656E5F626C6F636B5F636F756E745F7A83A72E6F666673657424A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7883A72E6F666673657426A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7983A72E6F666673657428A52E73697A6502AB2E76616C75655F6B696E64B368696464656E5F67726F75705F73697A655F7A83A72E6F66667365742AA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7883A72E6F66667365742CA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7983A72E6F66667365742EA52E73697A6502AB2E76616C75655F6B696E64B268696464656E5F72656D61696E6465725F7A83A72E6F666673657440A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7883A72E6F666673657448A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7983A72E6F666673657450A52E73697A6508AB2E76616C75655F6B696E64B668696464656E5F676C6F62616C5F6F66667365745F7A83A72E6F666673657458A52E73697A6502AB2E76616C75655F6B696E64B068696464656E5F677269645F64696D73B92E67726F75705F7365676D656E745F66697865645F73697A6500B62E6B65726E6172675F7365676D656E745F616C69676E08B52E6B65726E6172675F7365676D656E745F73697A65CD0118A92E6C616E6775616765A84F70656E434C2043B12E6C616E67756167655F76657273696F6E920200B82E6D61785F666C61745F776F726B67726F75705F73697A65CD0400A52E6E616D65B25F5A3973696D706C65416464506A504B6A6DBB2E707269766174655F7365676D656E745F66697865645F73697A6500AB2E736770725F636F756E740CB12E736770725F7370696C6C5F636F756E7400A72E73796D626F6CB55F5A3973696D706C65416464506A504B6A6D2E6B64B82E756E69666F726D5F776F726B5F67726F75705F73697A6501B32E757365735F64796E616D69635F737461636BC2AB2E766770725F636F756E7404B12E766770725F7370696C6C5F636F756E7400AF2E7761766566726F6E745F73697A6540AD616D646873612E746172676574B9616D6467636E2D616D642D616D646873612D2D676678393038AE616D646873612E76657273696F6E92010200000000000000000000000000000000000000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E03900000000000001000000000000000100000001000000010000001A000000000008400000D20001000000360A4A7A5238A4D3F113F4DD04000000040000000200000001000000000000000300000000000000000000000000000000000000005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F623730363264386333326134613933330000000000000000000000000000000000000000000000000000000000000000000000180100000000000080100000000000000000000000000000000000000000000000000000000000004000AF008C000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000C20102C02400000002000AC0000000008002027E7FC08CBF07FF0486FFFF0000060406920600006800008FD2820002000302067E0200043203030638008050DC02007F020102067E0000003203030238008050DC00007F03700F8CBF03050468008070DC00027F00000081BF00000000060000000000000070070000000000000B000000000000001800000000000000050000000000000020080000000000000A000000000000004600000000000000F5FEFF6F00000000D0070000000000000400000000000000F807000000000000000000000000000000000000000000004C696E6B65723A20414D44204C4C442031392E302E3000414D4420636C616E672076657273696F6E2031392E302E306769742028202032343231322063393630313665636534313337356462646438663037356266333762643666633333323230376233290000414D4420636C616E672076657273696F6E2031382E302E3067697420287373683A2F2F6765727269746769742F6C696768746E696E672F65632F6C6C766D2D70726F6A65637420616D642D6D61696E6C696E652D6F70656E20323431373620663935303039613166393032313232343865313036333964653837653635636163616338643961372900000000000000000000000000000000000000000000000000460000000002080070290000000000000000000000000000010000001203070000190000000000006C000000000000001400000011030600800800000000000040000000000000002A00000011000A00E0390000000000000100000000000000002E6E6F7465002E64796E73796D002E676E752E68617368002E68617368002E64796E737472002E726F64617461002E74657874002E64796E616D6963002E72656C726F5F70616464696E67002E627373002E636F6D6D656E74002E73796D746162002E7368737472746162002E73747274616200005F5A3973696D706C65416464506A504B6A6D005F5A3973696D706C65416464506A504B6A6D2E6B64005F5F6869705F637569645F62373036326438633332613461393333005F44594E414D494300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000070000000200000000000000380200000000000038020000000000003405000000000000000000000000000004000000000000000000000000000000070000000B00000002000000000000007007000000000000700700000000000060000000000000000500000001000000080000000000000018000000000000000F000000F6FFFF6F0200000000000000D007000000000000D007000000000000280000000000000002000000000000000800000000000000000000000000000019000000050000000200000000000000F807000000000000F80700000000000028000000000000000200000000000000040000000000000004000000000000001F000000030000000200000000000000200800000000000020080000000000004600000000000000000000000000000001000000000000000000000000000000270000000100000002000000000000008008000000000000800800000000000040000000000000000000000000000000400000000000000000000000000000002F000000010000000600000000000000001900000000000000090000000000006C00000000000000000000000000000000010000000000000000000000000000350000000600000003000000000000007029000000000000700900000000000070000000000000000500000000000000080000000000000010000000000000003E000000080000000300000000000000E029000000000000E00900000000000020060000000000000000000000000000010000000000000000000000000000004D000000080000000300000000000000E039000000000000E0090000000000000100000000000000000000000000000001000000000000000000000000000000520000000100000030000000000000000000000000000000E009000000000000F0000000000000000000000000000000010000000000000001000000000000005B0000000200000000000000000000000000000000000000D00A00000000000078000000000000000E0000000200000008000000000000001800000000000000630000000300000000000000000000000000000000000000480B00000000000075000000000000000000000000000000010000000000000000000000000000006D0000000300000000000000000000000000000000000000BD0B0000000000004F00000000000000000000000000000001000000000000000000000000000000 + - Name: .hipFatBinSegment + Type: SHT_PROGBITS + Flags: [ SHF_ALLOC ] + Address: 0x202FD0 + AddressAlign: 0x8 + Content: '465049480100000000102000000000000000000000000000' +... |